diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 9f69500b..79f9d601 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -63,10 +63,10 @@ void Chat::connectLLM() connect(this, &Chat::loadDefaultModelRequested, m_llmodel, &ChatLLM::loadDefaultModel, Qt::QueuedConnection); connect(this, &Chat::loadModelRequested, m_llmodel, &ChatLLM::loadModel, Qt::QueuedConnection); connect(this, &Chat::generateNameRequested, m_llmodel, &ChatLLM::generateName, Qt::QueuedConnection); + connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::QueuedConnection); // The following are blocking operations and will block the gui thread, therefore must be fast // to respond to - connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::BlockingQueuedConnection); connect(this, &Chat::resetResponseRequested, m_llmodel, &ChatLLM::resetResponse, Qt::BlockingQueuedConnection); connect(this, &Chat::resetContextRequested, m_llmodel, &ChatLLM::resetContext, Qt::BlockingQueuedConnection); } @@ -151,7 +151,7 @@ void Chat::handleLocalDocsRetrieved(const QString &uid, const QList void Chat::regenerateResponse() { - emit regenerateResponseRequested(); // blocking queued connection + emit regenerateResponseRequested(); } void Chat::stopGenerating() diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 1073af66..ff3c8331 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -146,7 +146,7 @@ bool ChatLLM::loadModel(const QString &modelName) // We have a live model, but it isn't the one we want bool alreadyAcquired = isModelLoaded(); if (alreadyAcquired) { - resetContextProtected(); + resetContext(); #if defined(DEBUG_MODEL_LOADING) qDebug() << "already acquired model deleted" << m_chat->id() << m_modelInfo.model; #endif @@ -301,12 +301,6 @@ void ChatLLM::resetResponse() } void ChatLLM::resetContext() -{ - resetContextProtected(); - emit sendResetContext(); -} - -void ChatLLM::resetContextProtected() { regenerateResponse(); m_ctx = LLModel::PromptContext(); diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index bf617f2d..d2bd6a82 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -81,14 +81,12 @@ Q_SIGNALS: void recalcChanged(); void sendStartup(); void sendModelLoaded(); - void sendResetContext(); void generatedNameChanged(); void stateChanged(); void threadStarted(); void shouldBeLoadedChanged(); protected: - void resetContextProtected(); bool handlePrompt(int32_t token); bool handleResponse(int32_t token, const std::string &response); bool handleRecalculate(bool isRecalc); diff --git a/gpt4all-chat/server.cpp b/gpt4all-chat/server.cpp index c816f16c..9211bf8a 100644 --- a/gpt4all-chat/server.cpp +++ b/gpt4all-chat/server.cpp @@ -286,7 +286,7 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re } // don't remember any context - resetContextProtected(); + resetContext(); QSettings settings; settings.sync();