mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
Remove these as it is mitigated by repeat penalty and models really should train this out.
This commit is contained in:
parent
0f195eae48
commit
037a9a6ec5
5
llm.cpp
5
llm.cpp
@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response)
|
||||
Q_ASSERT(!response.empty());
|
||||
m_response.append(response);
|
||||
emit responseChanged();
|
||||
|
||||
// Stop generation if we encounter prompt or response tokens
|
||||
QString r = QString::fromStdString(m_response);
|
||||
if (r.contains("### Prompt:") || r.contains("### Response:"))
|
||||
return false;
|
||||
return !m_stopGenerating;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user