mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
Always try and load default model first. Groovy is the default default.
This commit is contained in:
parent
97baf3d486
commit
62a885de40
15
download.cpp
15
download.cpp
@ -9,6 +9,7 @@
|
|||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QDir>
|
#include <QDir>
|
||||||
#include <QStandardPaths>
|
#include <QStandardPaths>
|
||||||
|
#include <QSettings>
|
||||||
|
|
||||||
class MyDownload: public Download { };
|
class MyDownload: public Download { };
|
||||||
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
||||||
@ -198,6 +199,7 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QString defaultModel;
|
||||||
QJsonArray jsonArray = document.array();
|
QJsonArray jsonArray = document.array();
|
||||||
|
|
||||||
m_modelMap.clear();
|
m_modelMap.clear();
|
||||||
@ -208,7 +210,8 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
|||||||
QString modelFilesize = obj["filesize"].toString();
|
QString modelFilesize = obj["filesize"].toString();
|
||||||
QByteArray modelMd5sum = obj["md5sum"].toString().toLatin1().constData();
|
QByteArray modelMd5sum = obj["md5sum"].toString().toLatin1().constData();
|
||||||
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == QString("true");
|
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == QString("true");
|
||||||
|
if (isDefault)
|
||||||
|
defaultModel = modelFilename;
|
||||||
quint64 sz = modelFilesize.toULongLong();
|
quint64 sz = modelFilesize.toULongLong();
|
||||||
if (sz < 1024) {
|
if (sz < 1024) {
|
||||||
modelFilesize = QString("%1 bytes").arg(sz);
|
modelFilesize = QString("%1 bytes").arg(sz);
|
||||||
@ -231,6 +234,16 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
|||||||
m_modelMap.insert(modelInfo.filename, modelInfo);
|
m_modelMap.insert(modelInfo.filename, modelInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remove ggml- prefix and .bin suffix
|
||||||
|
Q_ASSERT(defaultModel.startsWith("ggml-"));
|
||||||
|
defaultModel = defaultModel.remove(0, 5);
|
||||||
|
Q_ASSERT(defaultModel.endsWith(".bin"));
|
||||||
|
defaultModel.chop(4);
|
||||||
|
|
||||||
|
QSettings settings;
|
||||||
|
settings.sync();
|
||||||
|
settings.setValue("defaultModel", defaultModel);
|
||||||
|
settings.sync();
|
||||||
emit modelListChanged();
|
emit modelListChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
llm.cpp
8
llm.cpp
@ -7,6 +7,7 @@
|
|||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QProcess>
|
#include <QProcess>
|
||||||
#include <QResource>
|
#include <QResource>
|
||||||
|
#include <QSettings>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
class MyLLM: public LLM { };
|
class MyLLM: public LLM { };
|
||||||
@ -61,7 +62,12 @@ bool LLMObject::loadModel()
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return loadModelPrivate(models.first());
|
QSettings settings;
|
||||||
|
settings.sync();
|
||||||
|
QString defaultModel = settings.value("defaultModel", "gpt4all-j-v1.3-groovy").toString();
|
||||||
|
if (defaultModel.isEmpty() || !models.contains(defaultModel))
|
||||||
|
defaultModel = models.first();
|
||||||
|
return loadModelPrivate(defaultModel);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LLMObject::loadModelPrivate(const QString &modelName)
|
bool LLMObject::loadModelPrivate(const QString &modelName)
|
||||||
|
Loading…
Reference in New Issue
Block a user