From 44717682a7adc6c9fcf65e3fae73bfb9b2f21fec Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Wed, 6 Mar 2024 17:14:54 -0500 Subject: [PATCH] chat: implement display of model loading warnings (#2034) Signed-off-by: Jared Van Bortel --- gpt4all-chat/chat.cpp | 4 +++- gpt4all-chat/chat.h | 1 + gpt4all-chat/chatgpt.cpp | 6 +++--- gpt4all-chat/chatllm.cpp | 11 +++++++++-- gpt4all-chat/chatllm.h | 1 + gpt4all-chat/main.qml | 12 ++++++++++++ gpt4all-chat/modellist.cpp | 4 ++-- gpt4all-chat/network.cpp | 4 ++-- 8 files changed, 33 insertions(+), 10 deletions(-) diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 62c33a1a..86ea1a94 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -46,6 +46,7 @@ void Chat::connectLLM() connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::handleModelLoadingError, Qt::QueuedConnection); + connect(m_llmodel, &ChatLLM::modelLoadingWarning, this, &Chat::modelLoadingWarning, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection); connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection); @@ -333,7 +334,8 @@ void Chat::handleRecalculating() void Chat::handleModelLoadingError(const QString &error) { - qWarning() << "ERROR:" << qPrintable(error) << "id" << id(); + auto stream = qWarning().noquote() << "ERROR:" << error << "id"; + stream.quote() << id(); m_modelLoadingError = error; emit modelLoadingErrorChanged(); } diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index 7edbfba2..0f8b537a 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -113,6 +113,7 @@ Q_SIGNALS: void chatModelChanged(); void isModelLoadedChanged(); void modelLoadingPercentageChanged(); + void modelLoadingWarning(const QString &warning); void responseChanged(); void responseInProgressChanged(); void responseStateChanged(); diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index 264b3849..e2486e66 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -142,7 +142,7 @@ void ChatGPT::prompt(const std::string &prompt, QJsonDocument doc(root); #if defined(DEBUG) - qDebug() << "ChatGPT::prompt begin network request" << qPrintable(doc.toJson()); + qDebug().noquote() << "ChatGPT::prompt begin network request" << doc.toJson(); #endif m_responseCallback = responseCallback; @@ -231,7 +231,7 @@ void ChatGPTWorker::handleReadyRead() int code = response.toInt(&ok); if (!ok || code != 200) { m_chat->callResponse(-1, QString("\nERROR: 2 ChatGPT responded with error code \"%1-%2\" %3\n") - .arg(code).arg(reply->errorString()).arg(qPrintable(reply->readAll())).toStdString()); + .arg(code).arg(reply->errorString()).arg(reply->readAll()).toStdString()); emit finished(); return; } @@ -246,7 +246,7 @@ void ChatGPTWorker::handleReadyRead() if (jsonData == "[DONE]") continue; #if defined(DEBUG) - qDebug() << "line" << qPrintable(jsonData); + qDebug().noquote() << "line" << jsonData; #endif QJsonParseError err; const QJsonDocument document = QJsonDocument::fromJson(jsonData.toUtf8(), &err); diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index fa2d5539..5650b43d 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -308,7 +308,14 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) if (m_llModelInfo.model) { if (m_llModelInfo.model->isModelBlacklisted(filePath.toStdString())) { - // TODO(cebtenzzre): warn that this model is out-of-date + static QSet warned; + auto fname = modelInfo.filename(); + if (!warned.contains(fname)) { + emit modelLoadingWarning(QString( + "%1 is known to be broken. Please get a replacement via the download dialog." + ).arg(fname)); + warned.insert(fname); // don't warn again until restart + } } m_llModelInfo.model->setProgressCallback([this](float progress) -> bool { @@ -996,7 +1003,7 @@ void ChatLLM::restoreState() m_llModelInfo.model->restoreState(static_cast(reinterpret_cast(m_state.data()))); m_processedSystemPrompt = true; } else { - qWarning() << "restoring state from text because" << m_llModelInfo.model->stateSize() << "!=" << m_state.size() << "\n"; + qWarning() << "restoring state from text because" << m_llModelInfo.model->stateSize() << "!=" << m_state.size(); m_restoreStateFromText = true; } diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index f4015d1b..03e6edc6 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -120,6 +120,7 @@ Q_SIGNALS: void recalcChanged(); void modelLoadingPercentageChanged(float); void modelLoadingError(const QString &error); + void modelLoadingWarning(const QString &warning); void responseChanged(const QString &response); void promptProcessing(); void responseStopped(); diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 54375a28..f1691b8e 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -93,6 +93,9 @@ Window { if (currentChat.modelLoadingError !== "") modelLoadingErrorPopup.open() } + function onModelLoadingWarning(warning) { + modelLoadingWarningPopup.open_(warning) + } } property bool hasShownModelDownload: false @@ -213,6 +216,15 @@ Window { + "
  • Check out our discord channel for help") } + PopupDialog { + id: modelLoadingWarningPopup + property string message + anchors.centerIn: parent + shouldTimeOut: false + text: qsTr("

    Warning

    %1

    ").arg(message) + function open_(msg) { message = msg; open(); } + } + Rectangle { id: accentRibbon anchors.left: parent.left diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index ecd2592e..2df9cf58 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -1311,7 +1311,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) if (!file.open(QIODeviceBase::WriteOnly)) { qWarning() << "ERROR: Couldn't write models config file: " << modelsConfig; } else { - file.write(jsonData.constData()); + file.write(jsonData); file.close(); } } @@ -1328,7 +1328,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) QString requiresVersion = obj["requires"].toString(); QString versionRemoved = obj["removedIn"].toString(); QString url = obj["url"].toString(); - QByteArray modelHash = obj["md5sum"].toString().toLatin1().constData(); + QByteArray modelHash = obj["md5sum"].toString().toLatin1(); bool isDefault = obj.contains("isDefault") && obj["isDefault"] == QString("true"); bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == QString("true"); QString description = obj["description"].toString(); diff --git a/gpt4all-chat/network.cpp b/gpt4all-chat/network.cpp index 6ca2a643..596d4eec 100644 --- a/gpt4all-chat/network.cpp +++ b/gpt4all-chat/network.cpp @@ -462,9 +462,9 @@ void Network::handleIpifyFinished() qWarning() << "ERROR: ipify invalid response."; if (code != 200) qWarning() << "ERROR: ipify response != 200 code:" << code; - m_ipify = qPrintable(reply->readAll()); + m_ipify = reply->readAll(); #if defined(DEBUG) - printf("ipify finished %s\n", m_ipify.toLatin1().constData()); + printf("ipify finished %s\n", qPrintable(m_ipify)); fflush(stdout); #endif reply->deleteLater();