From a5b93cf0957607b658648f51b17859477e516659 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 4 Oct 2023 10:39:14 -0400 Subject: [PATCH] more accurate fallback descriptions --- gpt4all-chat/chatllm.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 60dce440..c7e1208d 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -282,9 +282,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) } } - if (!device) { - emit reportFallbackReason("
Using CPU: device not found"); - } else if (!m_llModelInfo.model->initializeGPUDevice(*device)) { + if (!device || !m_llModelInfo.model->initializeGPUDevice(*device)) { emit reportFallbackReason("
Using CPU: failed to init device"); } else { actualDevice = QString::fromStdString(device->name); @@ -300,9 +298,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) // we asked llama.cpp to use the CPU } else if (!success) { // llama_init_from_file returned nullptr - // this may happen because ggml_metal_add_buffer failed emit reportDevice("CPU"); - emit reportFallbackReason("
Using CPU: llama_init_from_file failed"); + emit reportFallbackReason("
Using CPU: loading failed (out of VRAM?)"); success = m_llModelInfo.model->loadModel(filePath.toStdString()); } else if (!m_llModelInfo.model->usingGPUDevice()) { // ggml_vk_init was not called in llama.cpp