diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 7b6cca79..3ae25616 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -96,10 +96,22 @@ bool Chat::isModelLoaded() const return m_llmodel->isModelLoaded(); } +void Chat::resetResponseState() +{ + if (m_responseInProgress && m_responseState == Chat::LocalDocsRetrieval) + return; + + m_responseInProgress = true; + m_responseState = Chat::LocalDocsRetrieval; + emit responseInProgressChanged(); + emit responseStateChanged(); +} + void Chat::prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty, int32_t repeat_penalty_tokens) { + resetResponseState(); emit promptRequested( prompt, prompt_template, @@ -115,6 +127,8 @@ void Chat::prompt(const QString &prompt, const QString &prompt_template, int32_t void Chat::regenerateResponse() { + const int index = m_chatModel->count() - 1; + m_chatModel->updateReferences(index, QString(), QList()); emit regenerateResponseRequested(); } @@ -234,10 +248,7 @@ void Chat::setModelName(const QString &modelName) void Chat::newPromptResponsePair(const QString &prompt) { - m_responseInProgress = true; - m_responseState = Chat::LocalDocsRetrieval; - emit responseInProgressChanged(); - emit responseStateChanged(); + resetResponseState(); m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); m_chatModel->appendPrompt(tr("Prompt: "), prompt); m_chatModel->appendResponse(tr("Response: "), prompt); @@ -246,11 +257,8 @@ void Chat::newPromptResponsePair(const QString &prompt) void Chat::serverNewPromptResponsePair(const QString &prompt) { - m_responseInProgress = true; - m_responseState = Chat::LocalDocsRetrieval; - emit responseInProgressChanged(); - emit responseStateChanged(); - m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); + resetResponseState(); + m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); m_chatModel->appendPrompt(tr("Prompt: "), prompt); m_chatModel->appendResponse(tr("Response: "), prompt); } diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index be885a9e..72dcccdb 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -87,6 +87,7 @@ public: Q_INVOKABLE bool hasCollection(const QString &collection) const; Q_INVOKABLE void addCollection(const QString &collection); Q_INVOKABLE void removeCollection(const QString &collection); + void resetResponseState(); public Q_SLOTS: void serverNewPromptResponsePair(const QString &prompt);