diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 8730adbc..62c33a1a 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -243,7 +243,7 @@ void Chat::setModelInfo(const ModelInfo &modelInfo) if (m_modelInfo == modelInfo && isModelLoaded()) return; - m_modelLoadingPercentage = std::numeric_limits::min(); + m_modelLoadingPercentage = std::numeric_limits::min(); // small non-zero positive value emit isModelLoadedChanged(); m_modelLoadingError = QString(); emit modelLoadingErrorChanged(); diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 4b456e34..bf3f6253 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -222,7 +222,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) #endif delete m_llModelInfo.model; m_llModelInfo.model = nullptr; - emit modelLoadingPercentageChanged(std::numeric_limits::min()); + emit modelLoadingPercentageChanged(std::numeric_limits::min()); // small non-zero positive value } else if (!m_isServer) { // This is a blocking call that tries to retrieve the model we need from the model store. // If it succeeds, then we just have to restore state. If the store has never had a model