diff --git a/CMakeLists.txt b/CMakeLists.txt index cb086a06..78d38cd7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,7 +47,6 @@ set(CPACK_GENERATOR "IFW") if(${CMAKE_SYSTEM_NAME} MATCHES Linux) find_program(LINUXDEPLOYQT linuxdeployqt HINTS ${_qt_bin_dir}) - message(AUTHOR_WARNING "heeeeeee ${LINUXDEPLOYQT}") configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-linux.cmake.in" "${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake" @ONLY) set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake) @@ -59,6 +58,11 @@ elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows) set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake) set(CPACK_IFW_ROOT "C:/Qt/Tools/QtInstallerFramework/4.5") elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin) + find_program(MACDEPLOYQT macdeployqt HINTS ${_qt_bin_dir}) + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-mac.cmake.in" + "${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake" @ONLY) + set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake) + set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.5") endif() set(CPACK_PACKAGE_VERSION_MAJOR "0") diff --git a/llm.cpp b/llm.cpp index acd8cefd..c8c92b7d 100644 --- a/llm.cpp +++ b/llm.cpp @@ -13,6 +13,8 @@ LLM *LLM::globalInstance() return llmInstance(); } +static GPTJ::PromptContext s_ctx; + GPTJObject::GPTJObject() : QObject{nullptr} , m_gptj(new GPTJ) @@ -51,6 +53,11 @@ void GPTJObject::resetResponse() m_response = std::string(); } +void GPTJObject::resetContext() +{ + s_ctx = GPTJ::PromptContext(); +} + QString GPTJObject::response() const { return QString::fromStdString(m_response); @@ -75,8 +82,7 @@ bool GPTJObject::prompt(const QString &prompt) m_stopGenerating = false; auto func = std::bind(&GPTJObject::handleResponse, this, std::placeholders::_1); emit responseStarted(); - static GPTJ::PromptContext ctx; - m_gptj->prompt(prompt.toStdString(), func, ctx, 4096 /*number of chars to predict*/); + m_gptj->prompt(prompt.toStdString(), func, s_ctx, 4096 /*number of chars to predict*/); emit responseStopped(); return true; } @@ -93,6 +99,7 @@ LLM::LLM() connect(this, &LLM::promptRequested, m_gptj, &GPTJObject::prompt, Qt::QueuedConnection); connect(this, &LLM::resetResponseRequested, m_gptj, &GPTJObject::resetResponse, Qt::BlockingQueuedConnection); + connect(this, &LLM::resetContextRequested, m_gptj, &GPTJObject::resetContext, Qt::BlockingQueuedConnection); } bool LLM::isModelLoaded() const @@ -110,6 +117,11 @@ void LLM::resetResponse() emit resetResponseRequested(); // blocking queued connection } +void LLM::resetContext() +{ + emit resetContextRequested(); // blocking queued connection +} + void LLM::stopGenerating() { m_gptj->stopGenerating(); diff --git a/llm.h b/llm.h index 40da30f6..05013d94 100644 --- a/llm.h +++ b/llm.h @@ -18,6 +18,7 @@ public: bool loadModel(); bool isModelLoaded() const; void resetResponse(); + void resetContext(); void stopGenerating() { m_stopGenerating = true; } QString response() const; @@ -53,6 +54,7 @@ public: Q_INVOKABLE bool isModelLoaded() const; Q_INVOKABLE void prompt(const QString &prompt); + Q_INVOKABLE void resetContext(); Q_INVOKABLE void resetResponse(); Q_INVOKABLE void stopGenerating(); @@ -65,6 +67,7 @@ Q_SIGNALS: void responseInProgressChanged(); void promptRequested(const QString &prompt); void resetResponseRequested(); + void resetContextRequested(); private Q_SLOTS: void responseStarted(); diff --git a/main.qml b/main.qml index 55aca913..b8cf677f 100644 --- a/main.qml +++ b/main.qml @@ -187,7 +187,7 @@ Window { TextField { id: textInput anchors.left: parent.left - anchors.right: parent.right + anchors.right: resetContextButton.left anchors.bottom: parent.bottom anchors.margins: 30 color: "#dadadc" @@ -239,5 +239,25 @@ Window { } } } + + Button { + id: resetContextButton + anchors.right: parent.right + anchors.rightMargin: 30 + anchors.verticalCenter: textInput.verticalCenter + width: 40 + height: 40 + + background: Image { + anchors.fill: parent + source: "qrc:/gpt4all-chat/icons/regenerate.svg" + } + + onClicked: { + LLM.stopGenerating() + LLM.resetContext() + chatModel.clear() + } + } } }