From 21a3244645352e86d45b102575c777d550bad0c7 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 13 Sep 2023 19:30:27 -0400 Subject: [PATCH] Fix a bug where we're not properly falling back to CPU. --- gpt4all-chat/chatllm.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index afdf6bdc..b10eb6ec 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -275,8 +275,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) if (requestedDevice != "CPU") { const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString()); std::vector availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory); - if (!availableDevices.empty() && requestedDevice == "Auto" && devices.front().type == 2 /*a discrete gpu*/) { - m_llModelInfo.model->initializeGPUDevice(devices.front()); + if (!availableDevices.empty() && requestedDevice == "Auto" && availableDevices.front().type == 2 /*a discrete gpu*/) { + m_llModelInfo.model->initializeGPUDevice(availableDevices.front()); } else { for (LLModel::GPUDevice &d : availableDevices) { if (QString::fromStdString(d.name) == requestedDevice) {