mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
backend: do not use Vulkan with non-LLaMA models
This commit is contained in:
parent
672cb850f9
commit
1534df3e9f
@ -309,8 +309,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
|||||||
// We might have had to fallback to CPU after load if the model is not possible to accelerate
|
// We might have had to fallback to CPU after load if the model is not possible to accelerate
|
||||||
// for instance if the quantization method is not supported on Vulkan yet
|
// for instance if the quantization method is not supported on Vulkan yet
|
||||||
emit reportDevice("CPU");
|
emit reportDevice("CPU");
|
||||||
// TODO(cebtenzzre): report somewhere if llamamodel decided the model was not supported
|
emit reportFallbackReason("<br>Using CPU: unsupported model or quant");
|
||||||
emit reportFallbackReason("<br>Using CPU: unsupported quantization type");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
||||||
|
Loading…
Reference in New Issue
Block a user