From 968868415e4ee6631977576e0a4f6ff26c1e68c1 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 20 Jun 2023 17:14:11 -0400 Subject: [PATCH] Move saving chats to a thread and display what we're doing to the user. --- gpt4all-chat/chatlistmodel.cpp | 30 +++++++++++++++++++++++++++--- gpt4all-chat/chatlistmodel.h | 22 +++++++++++++++++++++- gpt4all-chat/llm.cpp | 8 -------- gpt4all-chat/llm.h | 3 --- gpt4all-chat/main.qml | 27 +++++++++++++++++++++++++++ 5 files changed, 75 insertions(+), 15 deletions(-) diff --git a/gpt4all-chat/chatlistmodel.cpp b/gpt4all-chat/chatlistmodel.cpp index 510d9dbc..e1c184ec 100644 --- a/gpt4all-chat/chatlistmodel.cpp +++ b/gpt4all-chat/chatlistmodel.cpp @@ -63,11 +63,17 @@ void ChatListModel::removeChatFile(Chat *chat) const qWarning() << "ERROR: Couldn't remove chat file:" << file.fileName(); } -void ChatListModel::saveChats() const +ChatSaver::ChatSaver() + : QObject(nullptr) +{ + moveToThread(&m_thread); + m_thread.start(); +} + +void ChatListModel::saveChats() { - QElapsedTimer timer; - timer.start(); const QString savePath = Download::globalInstance()->downloadLocalModelsPath(); + QVector toSave; for (Chat *chat : m_chats) { if (chat == m_serverChat) continue; @@ -76,6 +82,23 @@ void ChatListModel::saveChats() const continue; if (isChatGPT && !m_shouldSaveChatGPTChats) continue; + toSave.append(chat); + } + if (toSave.isEmpty()) + return; + + ChatSaver *saver = new ChatSaver; + connect(this, &ChatListModel::requestSaveChats, saver, &ChatSaver::saveChats, Qt::QueuedConnection); + connect(saver, &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection); + emit requestSaveChats(toSave); +} + +void ChatSaver::saveChats(const QVector &chats) +{ + QElapsedTimer timer; + timer.start(); + const QString savePath = Download::globalInstance()->downloadLocalModelsPath(); + for (Chat *chat : chats) { QString fileName = "gpt4all-" + chat->id() + ".chat"; QFile file(savePath + "/" + fileName); bool success = file.open(QIODevice::WriteOnly); @@ -98,6 +121,7 @@ void ChatListModel::saveChats() const } qint64 elapsedTime = timer.elapsed(); qDebug() << "serializing chats took:" << elapsedTime << "ms"; + emit saveChatsFinished(); } void ChatsRestoreThread::run() diff --git a/gpt4all-chat/chatlistmodel.h b/gpt4all-chat/chatlistmodel.h index a66aa3fe..10129011 100644 --- a/gpt4all-chat/chatlistmodel.h +++ b/gpt4all-chat/chatlistmodel.h @@ -14,6 +14,23 @@ Q_SIGNALS: void chatRestored(Chat *chat); }; +class ChatSaver : public QObject +{ + Q_OBJECT +public: + explicit ChatSaver(); + void stop(); + +Q_SIGNALS: + void saveChatsFinished(); + +public Q_SLOTS: + void saveChats(const QVector &chats); + +private: + QThread m_thread; +}; + class ChatListModel : public QAbstractListModel { Q_OBJECT @@ -190,7 +207,7 @@ public: int count() const { return m_chats.size(); } void removeChatFile(Chat *chat) const; - void saveChats() const; + Q_INVOKABLE void saveChats(); void restoreChat(Chat *chat); void chatsRestoredFinished(); @@ -202,6 +219,9 @@ Q_SIGNALS: void currentChatChanged(); void shouldSaveChatsChanged(); void shouldSaveChatGPTChatsChanged(); + void chatsSavedFinished(); + void requestSaveChats(const QVector &); + void saveChatsFinished(); private Q_SLOTS: void newChatCountChanged() diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index c998d5f8..03a44555 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -38,9 +38,6 @@ LLM::LLM() llmodelSearchPaths += ";" + frameworksDir; #endif LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); - - connect(QCoreApplication::instance(), &QCoreApplication::aboutToQuit, - this, &LLM::aboutToQuit); connect(this, &LLM::serverEnabledChanged, m_chatListModel, &ChatListModel::handleServerEnabledChanged); @@ -123,8 +120,3 @@ void LLM::setServerEnabled(bool enabled) m_serverEnabled = enabled; emit serverEnabledChanged(); } - -void LLM::aboutToQuit() -{ - m_chatListModel->saveChats(); -} diff --git a/gpt4all-chat/llm.h b/gpt4all-chat/llm.h index d0e88bc9..fe7c8646 100644 --- a/gpt4all-chat/llm.h +++ b/gpt4all-chat/llm.h @@ -34,9 +34,6 @@ Q_SIGNALS: void serverEnabledChanged(); void compatHardwareChanged(); -private Q_SLOTS: - void aboutToQuit(); - private: ChatListModel *m_chatListModel; int32_t m_threadCount; diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 20d4a7ed..89adada8 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -24,6 +24,25 @@ Window { property var currentChat: LLM.chatListModel.currentChat property var chatModel: currentChat.chatModel + property bool hasSaved: false + + onClosing: function(close) { + if (window.hasSaved) + return; + + savingPopup.open(); + LLM.chatListModel.saveChats(); + close.accepted = false + } + + Connections { + target: LLM.chatListModel + function onSaveChatsFinished() { + window.hasSaved = true; + savingPopup.close(); + window.close() + } + } color: theme.backgroundDarkest @@ -406,6 +425,14 @@ Window { } } + PopupDialog { + id: savingPopup + anchors.centerIn: parent + shouldTimeOut: false + shouldShowBusy: true + text: qsTr("Saving chats.") + } + MyToolButton { id: copyButton anchors.right: settingsButton.left