backend: do not use Vulkan with non-LLaMA models

gguf_latest_llama
Cebtenzzre 11 months ago committed by Adam Treat
parent 672cb850f9
commit 1534df3e9f

@ -309,8 +309,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
// We might have had to fallback to CPU after load if the model is not possible to accelerate // We might have had to fallback to CPU after load if the model is not possible to accelerate
// for instance if the quantization method is not supported on Vulkan yet // for instance if the quantization method is not supported on Vulkan yet
emit reportDevice("CPU"); emit reportDevice("CPU");
// TODO(cebtenzzre): report somewhere if llamamodel decided the model was not supported emit reportFallbackReason("<br>Using CPU: unsupported model or quant");
emit reportFallbackReason("<br>Using CPU: unsupported quantization type");
} }
MySettings::globalInstance()->setAttemptModelLoad(QString()); MySettings::globalInstance()->setAttemptModelLoad(QString());

Loading…
Cancel
Save