diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index 2f7732b6..e5ab32aa 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit 2f7732b667b5c7786da0fa59fd612cc87b04b325 +Subproject commit e5ab32aab84c9252e865114483dbd7505e5caabb diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 4ae8c843..afdf6bdc 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -279,8 +279,10 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) m_llModelInfo.model->initializeGPUDevice(devices.front()); } else { for (LLModel::GPUDevice &d : availableDevices) { - if (QString::fromStdString(d.name) == requestedDevice) + if (QString::fromStdString(d.name) == requestedDevice) { m_llModelInfo.model->initializeGPUDevice(d); + break; + } } } }