diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index aa19b696..d0c9d33b 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -672,7 +672,11 @@ void ChatLLM::unloadModel() if (!isModelLoaded() || m_isServer) return; - emit modelLoadingPercentageChanged(0.0f); + if (!m_forceUnloadModel || !m_shouldBeLoaded) + emit modelLoadingPercentageChanged(0.0f); + else + emit modelLoadingPercentageChanged(std::numeric_limits::min()); // small non-zero positive value + saveState(); #if defined(DEBUG_MODEL_LOADING) qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model; diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 7bacb6cb..70fe6dae 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -463,7 +463,7 @@ Window { MyMiniButton { id: ejectButton - visible: currentChat.isModelLoaded + visible: currentChat.isModelLoaded && !window.isCurrentlyLoading z: 500 anchors.right: parent.right anchors.rightMargin: 50