Generate the new prompt/response pair before model loading in server mode.

pull/595/head^2
Adam Treat 1 year ago committed by AT
parent f931de21c5
commit 748e7977ca

@ -465,7 +465,7 @@ void ChatLLM::forceUnloadModel()
void ChatLLM::unloadModel()
{
if (!isModelLoaded() || m_isServer) // FIXME: What if server switches models?
if (!isModelLoaded() || m_isServer)
return;
saveState();
@ -479,7 +479,7 @@ void ChatLLM::unloadModel()
void ChatLLM::reloadModel()
{
if (isModelLoaded() || m_isServer) // FIXME: What if server switches models?
if (isModelLoaded() || m_isServer)
return;
#if defined(DEBUG_MODEL_LOADING)

@ -170,18 +170,6 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
}
}
setShouldBeLoaded(true);
if (!foundModel) {
if (!loadDefaultModel()) {
std::cerr << "ERROR: couldn't load default model " << model.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::BadRequest);
}
} else if (!loadModel(model)) {
std::cerr << "ERROR: couldn't load model " << model.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::InternalServerError);
}
// We only support one prompt for now
QList<QString> prompts;
if (body.contains("prompt")) {
@ -284,6 +272,19 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
// adds prompt/response items to GUI
emit requestServerNewPromptResponsePair(actualPrompt); // blocks
// load the new model if necessary
setShouldBeLoaded(true);
if (!foundModel) {
if (!loadDefaultModel()) {
std::cerr << "ERROR: couldn't load default model " << model.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::BadRequest);
}
} else if (!loadModel(model)) {
std::cerr << "ERROR: couldn't load model " << model.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::InternalServerError);
}
// don't remember any context
resetContextProtected();

Loading…
Cancel
Save