diff --git a/gpt4all-backend/gptj.cpp b/gpt4all-backend/gptj.cpp index 786967c6..946fdeb6 100644 --- a/gpt4all-backend/gptj.cpp +++ b/gpt4all-backend/gptj.cpp @@ -983,7 +983,7 @@ void GPTJ::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0; diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index b7cf9780..3149af82 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -179,7 +179,7 @@ void LLamaModel::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0; diff --git a/gpt4all-backend/mpt.cpp b/gpt4all-backend/mpt.cpp index e002b63c..61e71cc4 100644 --- a/gpt4all-backend/mpt.cpp +++ b/gpt4all-backend/mpt.cpp @@ -908,7 +908,7 @@ void MPT::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0;