mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-18 03:25:46 +00:00
Stop generating anything on shutdown.
This commit is contained in:
parent
e2458454d3
commit
59f3c093cb
@ -90,6 +90,7 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
|
|||||||
|
|
||||||
ChatLLM::~ChatLLM()
|
ChatLLM::~ChatLLM()
|
||||||
{
|
{
|
||||||
|
m_stopGenerating = true;
|
||||||
m_llmThread.quit();
|
m_llmThread.quit();
|
||||||
m_llmThread.wait();
|
m_llmThread.wait();
|
||||||
|
|
||||||
@ -588,7 +589,7 @@ bool ChatLLM::handleNamePrompt(int32_t token)
|
|||||||
{
|
{
|
||||||
Q_UNUSED(token);
|
Q_UNUSED(token);
|
||||||
qt_noop();
|
qt_noop();
|
||||||
return true;
|
return !m_stopGenerating;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChatLLM::handleNameResponse(int32_t token, const std::string &response)
|
bool ChatLLM::handleNameResponse(int32_t token, const std::string &response)
|
||||||
@ -606,28 +607,26 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc)
|
|||||||
{
|
{
|
||||||
Q_UNUSED(isRecalc);
|
Q_UNUSED(isRecalc);
|
||||||
Q_UNREACHABLE();
|
Q_UNREACHABLE();
|
||||||
return true;
|
return !m_stopGenerating;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChatLLM::handleSystemPrompt(int32_t token)
|
bool ChatLLM::handleSystemPrompt(int32_t token)
|
||||||
{
|
{
|
||||||
Q_UNUSED(token);
|
Q_UNUSED(token);
|
||||||
qt_noop();
|
return !m_stopGenerating;
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChatLLM::handleSystemResponse(int32_t token, const std::string &response)
|
bool ChatLLM::handleSystemResponse(int32_t token, const std::string &response)
|
||||||
{
|
{
|
||||||
Q_UNUSED(token);
|
Q_UNUSED(token);
|
||||||
Q_UNUSED(response);
|
Q_UNUSED(response);
|
||||||
return false;
|
return !m_stopGenerating;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChatLLM::handleSystemRecalculate(bool isRecalc)
|
bool ChatLLM::handleSystemRecalculate(bool isRecalc)
|
||||||
{
|
{
|
||||||
Q_UNUSED(isRecalc);
|
Q_UNUSED(isRecalc);
|
||||||
Q_UNREACHABLE();
|
return !m_stopGenerating;
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChatLLM::serialize(QDataStream &stream, int version)
|
bool ChatLLM::serialize(QDataStream &stream, int version)
|
||||||
@ -757,6 +756,7 @@ void ChatLLM::processSystemPrompt()
|
|||||||
if (!isModelLoaded() || m_processedSystemPrompt || m_isServer)
|
if (!isModelLoaded() || m_processedSystemPrompt || m_isServer)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
m_stopGenerating = false;
|
||||||
auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1);
|
auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1);
|
||||||
auto responseFunc = std::bind(&ChatLLM::handleSystemResponse, this, std::placeholders::_1,
|
auto responseFunc = std::bind(&ChatLLM::handleSystemResponse, this, std::placeholders::_1,
|
||||||
std::placeholders::_2);
|
std::placeholders::_2);
|
||||||
|
Loading…
Reference in New Issue
Block a user