mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-18 03:25:46 +00:00
Get rid of blocking behavior for regenerate response.
This commit is contained in:
parent
337c7fecac
commit
912cb2a842
@ -63,10 +63,10 @@ void Chat::connectLLM()
|
||||
connect(this, &Chat::loadDefaultModelRequested, m_llmodel, &ChatLLM::loadDefaultModel, Qt::QueuedConnection);
|
||||
connect(this, &Chat::loadModelRequested, m_llmodel, &ChatLLM::loadModel, Qt::QueuedConnection);
|
||||
connect(this, &Chat::generateNameRequested, m_llmodel, &ChatLLM::generateName, Qt::QueuedConnection);
|
||||
connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::QueuedConnection);
|
||||
|
||||
// The following are blocking operations and will block the gui thread, therefore must be fast
|
||||
// to respond to
|
||||
connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::BlockingQueuedConnection);
|
||||
connect(this, &Chat::resetResponseRequested, m_llmodel, &ChatLLM::resetResponse, Qt::BlockingQueuedConnection);
|
||||
connect(this, &Chat::resetContextRequested, m_llmodel, &ChatLLM::resetContext, Qt::BlockingQueuedConnection);
|
||||
}
|
||||
@ -151,7 +151,7 @@ void Chat::handleLocalDocsRetrieved(const QString &uid, const QList<ResultInfo>
|
||||
|
||||
void Chat::regenerateResponse()
|
||||
{
|
||||
emit regenerateResponseRequested(); // blocking queued connection
|
||||
emit regenerateResponseRequested();
|
||||
}
|
||||
|
||||
void Chat::stopGenerating()
|
||||
|
@ -146,7 +146,7 @@ bool ChatLLM::loadModel(const QString &modelName)
|
||||
// We have a live model, but it isn't the one we want
|
||||
bool alreadyAcquired = isModelLoaded();
|
||||
if (alreadyAcquired) {
|
||||
resetContextProtected();
|
||||
resetContext();
|
||||
#if defined(DEBUG_MODEL_LOADING)
|
||||
qDebug() << "already acquired model deleted" << m_chat->id() << m_modelInfo.model;
|
||||
#endif
|
||||
@ -301,12 +301,6 @@ void ChatLLM::resetResponse()
|
||||
}
|
||||
|
||||
void ChatLLM::resetContext()
|
||||
{
|
||||
resetContextProtected();
|
||||
emit sendResetContext();
|
||||
}
|
||||
|
||||
void ChatLLM::resetContextProtected()
|
||||
{
|
||||
regenerateResponse();
|
||||
m_ctx = LLModel::PromptContext();
|
||||
|
@ -81,14 +81,12 @@ Q_SIGNALS:
|
||||
void recalcChanged();
|
||||
void sendStartup();
|
||||
void sendModelLoaded();
|
||||
void sendResetContext();
|
||||
void generatedNameChanged();
|
||||
void stateChanged();
|
||||
void threadStarted();
|
||||
void shouldBeLoadedChanged();
|
||||
|
||||
protected:
|
||||
void resetContextProtected();
|
||||
bool handlePrompt(int32_t token);
|
||||
bool handleResponse(int32_t token, const std::string &response);
|
||||
bool handleRecalculate(bool isRecalc);
|
||||
|
@ -286,7 +286,7 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
|
||||
}
|
||||
|
||||
// don't remember any context
|
||||
resetContextProtected();
|
||||
resetContext();
|
||||
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
|
Loading…
Reference in New Issue
Block a user