From 64e98b8ea9665cacf9ef5493bfffae97160d7d44 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 26 Jun 2023 09:35:29 -0400 Subject: [PATCH] Fix bug with model loading on initial load. --- gpt4all-chat/chat.cpp | 19 ++++++++++++++++++ gpt4all-chat/chat.h | 2 ++ gpt4all-chat/download.cpp | 8 +++++--- gpt4all-chat/main.qml | 32 +++++++++++++++++++++--------- gpt4all-chat/modellist.cpp | 14 +++++++++++++ gpt4all-chat/modellist.h | 7 ++++++- gpt4all-chat/qml/StartupDialog.qml | 6 ++---- 7 files changed, 71 insertions(+), 17 deletions(-) diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index fb20e218..89b1ccbc 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -17,6 +17,7 @@ Chat::Chat(QObject *parent) , m_isServer(false) , m_shouldDeleteLater(false) , m_isModelLoaded(false) + , m_shouldLoadModelWhenInstalled(false) { connectLLM(); } @@ -33,6 +34,7 @@ Chat::Chat(bool isServer, QObject *parent) , m_isServer(true) , m_shouldDeleteLater(false) , m_isModelLoaded(false) + , m_shouldLoadModelWhenInstalled(false) { connectLLM(); } @@ -65,6 +67,9 @@ void Chat::connectLLM() connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::QueuedConnection); connect(this, &Chat::resetResponseRequested, m_llmodel, &ChatLLM::resetResponse, Qt::QueuedConnection); connect(this, &Chat::resetContextRequested, m_llmodel, &ChatLLM::resetContext, Qt::QueuedConnection); + + connect(ModelList::globalInstance()->installedModels(), &InstalledModels::countChanged, + this, &Chat::handleModelInstalled, Qt::QueuedConnection); } void Chat::reset() @@ -311,9 +316,23 @@ void Chat::unloadModel() void Chat::reloadModel() { + // If the installed model list is empty, then we mark a special flag and monitor for when a model + // is installed + if (!ModelList::globalInstance()->installedModels()->count()) { + m_shouldLoadModelWhenInstalled = true; + return; + } m_llmodel->setShouldBeLoaded(true); } +void Chat::handleModelInstalled() +{ + if (!m_shouldLoadModelWhenInstalled) + return; + m_shouldLoadModelWhenInstalled = false; + reloadModel(); +} + void Chat::generatedNameChanged(const QString &name) { // Only use the first three words maximum and remove newlines and extra spaces diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index 82251038..834abb28 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -130,6 +130,7 @@ private Q_SLOTS: void handleTokenSpeedChanged(const QString &tokenSpeed); void handleDatabaseResultsChanged(const QList &results); void handleModelInfoChanged(const ModelInfo &modelInfo); + void handleModelInstalled(); private: QString m_id; @@ -150,6 +151,7 @@ private: bool m_isServer; bool m_shouldDeleteLater; bool m_isModelLoaded; + bool m_shouldLoadModelWhenInstalled; }; #endif // CHAT_H diff --git a/gpt4all-chat/download.cpp b/gpt4all-chat/download.cpp index 0ff5b4ca..457448dc 100644 --- a/gpt4all-chat/download.cpp +++ b/gpt4all-chat/download.cpp @@ -513,7 +513,7 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa } else { QFile::FileError error = file.error(); const QString errorString - = QString("ERROR: Could not save model to location: %1 failed with code %2").arg(saveFilePath).arg(error); + = QString("ERROR: Could not save model to location: %1 failed with code %1").arg(saveFilePath).arg(error); qWarning() << errorString; tempFile->close(); emit hashAndSaveFinished(false, errorString, tempFile, modelReply); @@ -532,11 +532,13 @@ void Download::handleModelDownloadFinished() m_activeDownloads.remove(modelReply); if (modelReply->error()) { - qWarning() << "ERROR: downloading:" << modelReply->errorString(); + const QString errorString + = QString("ERROR: Downloading failed with code %1 \"%2\"").arg(modelReply->error()).arg(modelReply->errorString()); + qWarning() << errorString; modelReply->deleteLater(); tempFile->deleteLater(); ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadingRole, false); - ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, modelReply->errorString()); + ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, errorString); return; } diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 140083e1..83b7d7c7 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -50,11 +50,7 @@ Window { // Startup code Component.onCompleted: { - if (!LLM.compatHardware) { - Network.sendNonCompatHardware(); - errorCompatHardware.open(); - } else - startupDialogs(); + startupDialogs(); } Connections { @@ -91,6 +87,12 @@ Window { } function startupDialogs() { + if (!LLM.compatHardware) { + Network.sendNonCompatHardware(); + errorCompatHardware.open(); + return; + } + // check for first time start of this version if (Download.isFirstStart()) { firstStartDialog.open(); @@ -98,7 +100,7 @@ Window { } // check for any current models and if not, open download dialog - if (ModelList.count === 0 && !firstStartDialog.opened) { + if (ModelList.installedModels.count === 0 && !firstStartDialog.opened) { downloadNewModels.open(); return; } @@ -117,7 +119,14 @@ Window { shouldShowBusy: false closePolicy: Popup.NoAutoClose modal: true - text: qsTr("Incompatible hardware detected. Your hardware does not meet the minimal requirements to run GPT4All. In particular, it does not seem to support AVX intrinsics. See here for more: https://en.wikipedia.org/wiki/Advanced_Vector_Extensions") + text: qsTr("

Encountered an error starting up:


") + + qsTr("\"Incompatible hardware detected.\"") + + qsTr("

Unfortunately, your CPU does not meet the minimal requirements to run ") + + qsTr("this program. In particular, it does not support AVX intrinsics which this ") + + qsTr("program requires to successfully run a modern large language model. ") + + qsTr("The only solution at this time is to upgrade your hardware to a more modern CPU.") + + qsTr("

See here for more information: ") + + qsTr("https://en.wikipedia.org/wiki/Advanced_Vector_Extensions") } StartupDialog { @@ -205,9 +214,11 @@ Window { anchors.horizontalCenter: parent.horizontalCenter leftPadding: 10 rightPadding: 20 - text: currentChat.modelLoadingError !== "" ? qsTr("Model loading error...") + text: ModelList.installedModels.count + ? currentChat.modelLoadingError !== "" ? qsTr("Model loading error...") : (comboBox.textAt(comboBox.currentIndex) !== "" ? comboBox.textAt(comboBox.currentIndex) : comboBox.valueAt(comboBox.currentIndex)) + : "" font: comboBox.font color: theme.textColor verticalAlignment: Text.AlignVCenter @@ -241,7 +252,10 @@ Window { Item { anchors.centerIn: parent - visible: !currentChat.isModelLoaded && currentChat.modelLoadingError === "" && !currentChat.isServer + visible: ModelList.installedModels.count + && !currentChat.isModelLoaded + && currentChat.modelLoadingError === "" + && !currentChat.isServer width: childrenRect.width height: childrenRect.height Row { diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 0943d713..c806686d 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -2,6 +2,15 @@ #include +InstalledModels::InstalledModels(QObject *parent) + : QSortFilterProxyModel(parent) +{ + connect(this, &InstalledModels::rowsInserted, this, &InstalledModels::countChanged); + connect(this, &InstalledModels::rowsRemoved, this, &InstalledModels::countChanged); + connect(this, &InstalledModels::modelReset, this, &InstalledModels::countChanged); + connect(this, &InstalledModels::layoutChanged, this, &InstalledModels::countChanged); +} + bool InstalledModels::filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const { @@ -10,6 +19,11 @@ bool InstalledModels::filterAcceptsRow(int sourceRow, return isInstalled; } +int InstalledModels::count() const +{ + return rowCount(); +} + DownloadableModels::DownloadableModels(QObject *parent) : QSortFilterProxyModel(parent) , m_expanded(false) diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index 50e8e78b..de42014d 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -69,8 +69,13 @@ Q_DECLARE_METATYPE(ModelInfo) class InstalledModels : public QSortFilterProxyModel { Q_OBJECT + Q_PROPERTY(int count READ count NOTIFY countChanged) public: - explicit InstalledModels(QObject *parent) : QSortFilterProxyModel(parent) {} + explicit InstalledModels(QObject *parent); + int count() const; + +Q_SIGNALS: + void countChanged(); protected: bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override; diff --git a/gpt4all-chat/qml/StartupDialog.qml b/gpt4all-chat/qml/StartupDialog.qml index fabc02ef..3cad47bc 100644 --- a/gpt4all-chat/qml/StartupDialog.qml +++ b/gpt4all-chat/qml/StartupDialog.qml @@ -146,13 +146,12 @@ model release that uses your data!") Layout.alignment: Qt.AlignVCenter Layout.row: 0 Layout.column: 1 - property bool defaultChecked: Network.usageStatsActive property alias checked: optInStatisticsRadioYes.checked property bool choiceMade: optInStatisticsRadioYes.checked || optInStatisticsRadioNo.checked RadioButton { id: optInStatisticsRadioYes - checked: optInStatisticsRadio.defaultChecked + checked: false text: qsTr("Yes") Accessible.role: Accessible.RadioButton Accessible.name: qsTr("Opt-in for anonymous usage statistics") @@ -258,13 +257,12 @@ model release that uses your data!") Layout.alignment: Qt.AlignVCenter Layout.row: 1 Layout.column: 1 - property bool defaultChecked: Network.isActive property alias checked: optInNetworkRadioYes.checked property bool choiceMade: optInNetworkRadioYes.checked || optInNetworkRadioNo.checked RadioButton { id: optInNetworkRadioYes - checked: optInNetworkRadio.defaultChecked + checked: false text: qsTr("Yes") Accessible.role: Accessible.RadioButton Accessible.name: qsTr("Opt-in for network")