diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp
index 60dce440..c7e1208d 100644
--- a/gpt4all-chat/chatllm.cpp
+++ b/gpt4all-chat/chatllm.cpp
@@ -282,9 +282,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
}
}
- if (!device) {
- emit reportFallbackReason("
Using CPU: device not found");
- } else if (!m_llModelInfo.model->initializeGPUDevice(*device)) {
+ if (!device || !m_llModelInfo.model->initializeGPUDevice(*device)) {
emit reportFallbackReason("
Using CPU: failed to init device");
} else {
actualDevice = QString::fromStdString(device->name);
@@ -300,9 +298,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
// we asked llama.cpp to use the CPU
} else if (!success) {
// llama_init_from_file returned nullptr
- // this may happen because ggml_metal_add_buffer failed
emit reportDevice("CPU");
- emit reportFallbackReason("
Using CPU: llama_init_from_file failed");
+ emit reportFallbackReason("
Using CPU: loading failed (out of VRAM?)");
success = m_llModelInfo.model->loadModel(filePath.toStdString());
} else if (!m_llModelInfo.model->usingGPUDevice()) {
// ggml_vk_init was not called in llama.cpp