mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
Provide a guardrail for OOM errors.
This commit is contained in:
parent
9ef53163dd
commit
88bbe30952
@ -211,6 +211,15 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
// Store the file info in the modelInfo in case we have an error loading
|
||||
m_llModelInfo.fileInfo = fileInfo;
|
||||
|
||||
// Check if we've previously tried to load this file and failed/crashed
|
||||
if (MySettings::globalInstance()->attemptModelLoad() == filePath) {
|
||||
MySettings::globalInstance()->setAttemptModelLoad(QString()); // clear the flag
|
||||
if (!m_isServer)
|
||||
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
|
||||
m_llModelInfo = LLModelInfo();
|
||||
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to out of memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename()));
|
||||
}
|
||||
|
||||
if (fileInfo.exists()) {
|
||||
if (isChatGPT) {
|
||||
QString apiKey;
|
||||
@ -239,7 +248,9 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
#endif
|
||||
|
||||
if (m_llModelInfo.model) {
|
||||
MySettings::globalInstance()->setAttemptModelLoad(filePath);
|
||||
bool success = m_llModelInfo.model->loadModel(filePath.toStdString());
|
||||
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
||||
if (!success) {
|
||||
delete std::exchange(m_llModelInfo.model, nullptr);
|
||||
if (!m_isServer)
|
||||
|
@ -163,7 +163,7 @@ Window {
|
||||
+ "<i>\"" + currentChat.modelLoadingError + "\"</i>"
|
||||
+ qsTr("<br><br>Model loading failures can happen for a variety of reasons, but the most common "
|
||||
+ "causes include a bad file format, an incomplete or corrupted download, the wrong file "
|
||||
+ "type or an incompatible model type. Here are some suggestions for resolving the problem:"
|
||||
+ "type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:"
|
||||
+ "<br><ul>"
|
||||
+ "<li>Ensure the model file has a compatible ggml format and type"
|
||||
+ "<li>Check the model file is complete in the download folder"
|
||||
|
@ -608,3 +608,24 @@ void MySettings::setNetworkUsageStatsActive(bool b)
|
||||
setting.sync();
|
||||
emit networkUsageStatsActiveChanged();
|
||||
}
|
||||
|
||||
QString MySettings::attemptModelLoad() const
|
||||
{
|
||||
QSettings setting;
|
||||
setting.sync();
|
||||
return setting.value("attemptModelLoad", QString()).toString();
|
||||
}
|
||||
|
||||
void MySettings::setAttemptModelLoad(const QString &modelFile)
|
||||
{
|
||||
if (attemptModelLoad() == modelFile)
|
||||
return;
|
||||
|
||||
QSettings setting;
|
||||
if (modelFile.isEmpty())
|
||||
setting.remove("attemptModelLoad");
|
||||
else
|
||||
setting.setValue("attemptModelLoad", modelFile);
|
||||
setting.sync();
|
||||
emit attemptModelLoadChanged();
|
||||
}
|
||||
|
@ -93,6 +93,9 @@ public:
|
||||
bool networkUsageStatsActive() const;
|
||||
void setNetworkUsageStatsActive(bool b);
|
||||
|
||||
QString attemptModelLoad() const;
|
||||
void setAttemptModelLoad(const QString &modelFile);
|
||||
|
||||
Q_SIGNALS:
|
||||
void nameChanged(const ModelInfo &model);
|
||||
void filenameChanged(const ModelInfo &model);
|
||||
@ -119,6 +122,7 @@ Q_SIGNALS:
|
||||
void networkAttributionChanged();
|
||||
void networkIsActiveChanged();
|
||||
void networkUsageStatsActiveChanged();
|
||||
void attemptModelLoadChanged();
|
||||
|
||||
private:
|
||||
bool m_forceMetal;
|
||||
|
Loading…
Reference in New Issue
Block a user