mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-18 03:25:46 +00:00
Fix a bug where we're not properly falling back to CPU.
This commit is contained in:
parent
0458c9b4e6
commit
21a3244645
@ -275,8 +275,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
|||||||
if (requestedDevice != "CPU") {
|
if (requestedDevice != "CPU") {
|
||||||
const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString());
|
const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString());
|
||||||
std::vector<LLModel::GPUDevice> availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);
|
std::vector<LLModel::GPUDevice> availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);
|
||||||
if (!availableDevices.empty() && requestedDevice == "Auto" && devices.front().type == 2 /*a discrete gpu*/) {
|
if (!availableDevices.empty() && requestedDevice == "Auto" && availableDevices.front().type == 2 /*a discrete gpu*/) {
|
||||||
m_llModelInfo.model->initializeGPUDevice(devices.front());
|
m_llModelInfo.model->initializeGPUDevice(availableDevices.front());
|
||||||
} else {
|
} else {
|
||||||
for (LLModel::GPUDevice &d : availableDevices) {
|
for (LLModel::GPUDevice &d : availableDevices) {
|
||||||
if (QString::fromStdString(d.name) == requestedDevice) {
|
if (QString::fromStdString(d.name) == requestedDevice) {
|
||||||
|
Loading…
Reference in New Issue
Block a user