more accurate fallback descriptions

This commit is contained in:
Cebtenzzre 2023-10-04 10:39:14 -04:00 committed by Adam Treat
parent 75deee9adb
commit a5b93cf095

View File

@ -282,9 +282,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
} }
} }
if (!device) { if (!device || !m_llModelInfo.model->initializeGPUDevice(*device)) {
emit reportFallbackReason("<br>Using CPU: device not found");
} else if (!m_llModelInfo.model->initializeGPUDevice(*device)) {
emit reportFallbackReason("<br>Using CPU: failed to init device"); emit reportFallbackReason("<br>Using CPU: failed to init device");
} else { } else {
actualDevice = QString::fromStdString(device->name); actualDevice = QString::fromStdString(device->name);
@ -300,9 +298,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
// we asked llama.cpp to use the CPU // we asked llama.cpp to use the CPU
} else if (!success) { } else if (!success) {
// llama_init_from_file returned nullptr // llama_init_from_file returned nullptr
// this may happen because ggml_metal_add_buffer failed
emit reportDevice("CPU"); emit reportDevice("CPU");
emit reportFallbackReason("<br>Using CPU: llama_init_from_file failed"); emit reportFallbackReason("<br>Using CPU: loading failed (out of VRAM?)");
success = m_llModelInfo.model->loadModel(filePath.toStdString()); success = m_llModelInfo.model->loadModel(filePath.toStdString());
} else if (!m_llModelInfo.model->usingGPUDevice()) { } else if (!m_llModelInfo.model->usingGPUDevice()) {
// ggml_vk_init was not called in llama.cpp // ggml_vk_init was not called in llama.cpp