mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-08 07:10:32 +00:00
Always try and load default model first. Groovy is the default default.
This commit is contained in:
parent
83fb05345e
commit
c6c5e0bb4f
15
download.cpp
15
download.cpp
@ -9,6 +9,7 @@
|
||||
#include <QUrl>
|
||||
#include <QDir>
|
||||
#include <QStandardPaths>
|
||||
#include <QSettings>
|
||||
|
||||
class MyDownload: public Download { };
|
||||
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
||||
@ -198,6 +199,7 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
||||
return;
|
||||
}
|
||||
|
||||
QString defaultModel;
|
||||
QJsonArray jsonArray = document.array();
|
||||
|
||||
m_modelMap.clear();
|
||||
@ -208,7 +210,8 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
||||
QString modelFilesize = obj["filesize"].toString();
|
||||
QByteArray modelMd5sum = obj["md5sum"].toString().toLatin1().constData();
|
||||
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == QString("true");
|
||||
|
||||
if (isDefault)
|
||||
defaultModel = modelFilename;
|
||||
quint64 sz = modelFilesize.toULongLong();
|
||||
if (sz < 1024) {
|
||||
modelFilesize = QString("%1 bytes").arg(sz);
|
||||
@ -231,6 +234,16 @@ void Download::parseJsonFile(const QByteArray &jsonData)
|
||||
m_modelMap.insert(modelInfo.filename, modelInfo);
|
||||
}
|
||||
|
||||
// remove ggml- prefix and .bin suffix
|
||||
Q_ASSERT(defaultModel.startsWith("ggml-"));
|
||||
defaultModel = defaultModel.remove(0, 5);
|
||||
Q_ASSERT(defaultModel.endsWith(".bin"));
|
||||
defaultModel.chop(4);
|
||||
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
settings.setValue("defaultModel", defaultModel);
|
||||
settings.sync();
|
||||
emit modelListChanged();
|
||||
}
|
||||
|
||||
|
8
llm.cpp
8
llm.cpp
@ -7,6 +7,7 @@
|
||||
#include <QFile>
|
||||
#include <QProcess>
|
||||
#include <QResource>
|
||||
#include <QSettings>
|
||||
#include <fstream>
|
||||
|
||||
class MyLLM: public LLM { };
|
||||
@ -61,7 +62,12 @@ bool LLMObject::loadModel()
|
||||
return false;
|
||||
}
|
||||
|
||||
return loadModelPrivate(models.first());
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
QString defaultModel = settings.value("defaultModel", "gpt4all-j-v1.3-groovy").toString();
|
||||
if (defaultModel.isEmpty() || !models.contains(defaultModel))
|
||||
defaultModel = models.first();
|
||||
return loadModelPrivate(defaultModel);
|
||||
}
|
||||
|
||||
bool LLMObject::loadModelPrivate(const QString &modelName)
|
||||
|
Loading…
Reference in New Issue
Block a user