From 7b66cb711999a1c3e9437d92f81536dc34c16d36 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 7 May 2023 11:24:07 -0400 Subject: [PATCH] Add debug for chatllm model loading and fix order of getting rid of the dummy chat when no models are restored. --- chatlistmodel.cpp | 7 ++++++- chatllm.cpp | 29 ++++++++++++++++++++++++++++- llmodel/gptj.cpp | 1 - 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/chatlistmodel.cpp b/chatlistmodel.cpp index 3ba80df5..cd0ef0c5 100644 --- a/chatlistmodel.cpp +++ b/chatlistmodel.cpp @@ -231,8 +231,13 @@ void ChatListModel::restoreChat(Chat *chat) void ChatListModel::chatsRestoredFinished() { if (m_dummyChat) { - removeChat(m_dummyChat); + beginResetModel(); + Chat *dummy = m_dummyChat; m_dummyChat = nullptr; + m_chats.clear(); + addChat(); + delete dummy; + endResetModel(); } if (m_chats.isEmpty()) diff --git a/chatllm.cpp b/chatllm.cpp index 7be888b1..7b2d89d6 100644 --- a/chatllm.cpp +++ b/chatllm.cpp @@ -100,6 +100,13 @@ bool ChatLLM::loadModel(const QString &modelName) m_llmodel->loadModel(filePath.toStdString()); } + restoreState(); + +#if defined(DEBUG) + qDebug() << "chatllm modelLoadedChanged" << m_chat->id(); + fflush(stdout); +#endif + emit isModelLoadedChanged(); if (isFirstLoad) @@ -200,6 +207,9 @@ bool ChatLLM::handlePrompt(int32_t token) { // m_promptResponseTokens and m_responseLogits are related to last prompt/response not // the entire context window which we can reset on regenerate prompt +#if defined(DEBUG) + qDebug() << "chatllm prompt process" << m_chat->id() << token; +#endif ++m_promptResponseTokens; return !m_stopGenerating; } @@ -280,6 +290,9 @@ bool ChatLLM::prompt(const QString &prompt, const QString &prompt_template, int3 void ChatLLM::unloadModel() { +#if defined(DEBUG) + qDebug() << "chatllm unloadModel" << m_chat->id(); +#endif saveState(); delete m_llmodel; m_llmodel = nullptr; @@ -288,12 +301,14 @@ void ChatLLM::unloadModel() void ChatLLM::reloadModel(const QString &modelName) { +#if defined(DEBUG) + qDebug() << "chatllm reloadModel" << m_chat->id(); +#endif if (modelName.isEmpty()) { loadDefaultModel(); } else { loadModel(modelName); } - restoreState(); } void ChatLLM::generateName() @@ -367,6 +382,9 @@ bool ChatLLM::serialize(QDataStream &stream) saveState(); QByteArray compressed = qCompress(m_state); stream << compressed; +#if defined(DEBUG) + qDebug() << "chatllm serialize" << m_chat->id() << m_state.size(); +#endif return stream.status() == QDataStream::Ok; } @@ -392,6 +410,9 @@ bool ChatLLM::deserialize(QDataStream &stream) QByteArray compressed; stream >> compressed; m_state = qUncompress(compressed); +#if defined(DEBUG) + qDebug() << "chatllm deserialize" << m_chat->id(); +#endif return stream.status() == QDataStream::Ok; } @@ -402,6 +423,9 @@ void ChatLLM::saveState() const size_t stateSize = m_llmodel->stateSize(); m_state.resize(stateSize); +#if defined(DEBUG) + qDebug() << "chatllm saveState" << m_chat->id() << "size:" << m_state.size(); +#endif m_llmodel->saveState(static_cast(reinterpret_cast(m_state.data()))); } @@ -410,5 +434,8 @@ void ChatLLM::restoreState() if (!isModelLoaded() || m_state.isEmpty()) return; +#if defined(DEBUG) + qDebug() << "chatllm restoreState" << m_chat->id() << "size:" << m_state.size(); +#endif m_llmodel->restoreState(static_cast(reinterpret_cast(m_state.data()))); } diff --git a/llmodel/gptj.cpp b/llmodel/gptj.cpp index 74977854..d7057bd7 100644 --- a/llmodel/gptj.cpp +++ b/llmodel/gptj.cpp @@ -60,7 +60,6 @@ struct gptj_buffer { } ~gptj_buffer() { - std::cout << "yes we are cleaning up" << std::endl; fflush(stdout); delete[] addr; }