Remove these as it is mitigated by repeat penalty and models really should train this out.

pull/520/head
Adam Treat 1 year ago
parent ef2e1bd4fe
commit 9f323759ce

@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response)
Q_ASSERT(!response.empty());
m_response.append(response);
emit responseChanged();
// Stop generation if we encounter prompt or response tokens
QString r = QString::fromStdString(m_response);
if (r.contains("### Prompt:") || r.contains("### Response:"))
return false;
return !m_stopGenerating;
}

Loading…
Cancel
Save