From 9bfff8bfcb7d3121dc350c4e372cfff4790b0123 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sat, 20 May 2023 20:02:14 -0400 Subject: [PATCH] Add new reverse prompt for new localdocs context feature. --- gpt4all-backend/gptj.cpp | 2 +- gpt4all-backend/llamamodel.cpp | 2 +- gpt4all-backend/mpt.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gpt4all-backend/gptj.cpp b/gpt4all-backend/gptj.cpp index 786967c6..946fdeb6 100644 --- a/gpt4all-backend/gptj.cpp +++ b/gpt4all-backend/gptj.cpp @@ -983,7 +983,7 @@ void GPTJ::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0; diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index b7cf9780..3149af82 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -179,7 +179,7 @@ void LLamaModel::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0; diff --git a/gpt4all-backend/mpt.cpp b/gpt4all-backend/mpt.cpp index e002b63c..61e71cc4 100644 --- a/gpt4all-backend/mpt.cpp +++ b/gpt4all-backend/mpt.cpp @@ -908,7 +908,7 @@ void MPT::prompt(const std::string &prompt, std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts - = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; + = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant", "### Context" }; // predict next tokens int32_t totalPredictions = 0;