From c0d4a9d426c322fe7419d890b0b71a1dda8be20a Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 1 May 2023 12:30:54 -0400 Subject: [PATCH] Continue to shrink the API space for qml and the backend. --- chat.cpp | 12 +++++++----- chat.h | 2 +- chatmodel.h | 4 ++-- main.qml | 4 +--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/chat.cpp b/chat.cpp index f9fcbdb9..0db67f4b 100644 --- a/chat.cpp +++ b/chat.cpp @@ -55,11 +55,6 @@ void Chat::regenerateResponse() emit regenerateResponseRequested(); // blocking queued connection } -void Chat::resetResponse() -{ - emit resetResponseRequested(); // blocking queued connection -} - void Chat::stopGenerating() { m_llmodel->stopGenerating(); @@ -109,6 +104,13 @@ int32_t Chat::threadCount() { return m_llmodel->threadCount(); } +void Chat::newPromptResponsePair(const QString &prompt) +{ + m_chatModel->appendPrompt(tr("Prompt: "), prompt); + m_chatModel->appendResponse(tr("Response: "), prompt); + emit resetResponseRequested(); // blocking queued connection +} + bool Chat::isRecalc() const { return m_llmodel->isRecalc(); diff --git a/chat.h b/chat.h index b68fb74c..abc60897 100644 --- a/chat.h +++ b/chat.h @@ -34,11 +34,11 @@ public: Q_INVOKABLE void prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty, int32_t repeat_penalty_tokens); Q_INVOKABLE void regenerateResponse(); - Q_INVOKABLE void resetResponse(); Q_INVOKABLE void stopGenerating(); Q_INVOKABLE void syncThreadCount(); Q_INVOKABLE void setThreadCount(int32_t n_threads); Q_INVOKABLE int32_t threadCount(); + Q_INVOKABLE void newPromptResponsePair(const QString &prompt); QString response() const; bool responseInProgress() const { return m_responseInProgress; } diff --git a/chatmodel.h b/chatmodel.h index c393d347..e5be2719 100644 --- a/chatmodel.h +++ b/chatmodel.h @@ -101,7 +101,7 @@ public: return roles; } - Q_INVOKABLE void appendPrompt(const QString &name, const QString &value) + void appendPrompt(const QString &name, const QString &value) { ChatItem item; item.name = name; @@ -112,7 +112,7 @@ public: emit countChanged(); } - Q_INVOKABLE void appendResponse(const QString &name, const QString &prompt) + void appendResponse(const QString &name, const QString &prompt) { ChatItem item; item.id = m_chatItems.count(); // This is only relevant for responses diff --git a/main.qml b/main.qml index 6d35c781..772e727a 100644 --- a/main.qml +++ b/main.qml @@ -949,9 +949,7 @@ Window { chatModel.updateCurrentResponse(index, false); chatModel.updateValue(index, LLM.currentChat.response); } - chatModel.appendPrompt(qsTr("Prompt: "), textInput.text); - chatModel.appendResponse(qsTr("Response: "), textInput.text); - LLM.currentChat.resetResponse() + LLM.currentChat.newPromptResponsePair(textInput.text); LLM.currentChat.prompt(textInput.text, settingsDialog.promptTemplate, settingsDialog.maxLength, settingsDialog.topK,