chatllm: grammar fix

gguf_latest_llama
Cebtenzzre 1 year ago committed by Adam Treat
parent d5d72f0361
commit a49a1dcdf4

@ -227,7 +227,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
if (!m_isServer)
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to out of memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename()));
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename()));
}
if (fileInfo.exists()) {

Loading…
Cancel
Save