diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 6f008062..3212d51c 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -227,7 +227,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) if (!m_isServer) LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store m_llModelInfo = LLModelInfo(); - emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to out of memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename())); + emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename())); } if (fileInfo.exists()) {