chatllm: use a better prompt for the generated chat name (#2322)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-05-09 09:38:19 -04:00 committed by GitHub
parent f26e8d0d87
commit 5fb9d17c00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -782,13 +782,13 @@ void ChatLLM::generateName()
if (!isModelLoaded())
return;
std::string instructPrompt("### Instruction:\n%1\n### Response:\n"); // standard Alpaca
auto promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo);
auto promptFunc = std::bind(&ChatLLM::handleNamePrompt, this, std::placeholders::_1);
auto responseFunc = std::bind(&ChatLLM::handleNameResponse, this, std::placeholders::_1, std::placeholders::_2);
auto recalcFunc = std::bind(&ChatLLM::handleNameRecalculate, this, std::placeholders::_1);
LLModel::PromptContext ctx = m_ctx;
m_llModelInfo.model->prompt("Describe response above in three words.", instructPrompt, promptFunc, responseFunc,
recalcFunc, ctx);
m_llModelInfo.model->prompt("Describe the above conversation in three words or less.",
promptTemplate.toStdString(), promptFunc, responseFunc, recalcFunc, ctx);
std::string trimmed = trim_whitespace(m_nameResponse);
if (trimmed != m_nameResponse) {
m_nameResponse = trimmed;