From 2a7fe95ff42151dbf55f49ad51b16491f4282d09 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Mon, 22 Jul 2024 13:32:17 -0400 Subject: [PATCH] llamamodel: always print special tokens (#2701) Signed-off-by: Jared Van Bortel --- gpt4all-backend/llamamodel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index dd2be2bf..966367b3 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -542,10 +542,10 @@ std::vector LLamaModel::tokenize(PromptContext &ctx, const std:: std::string LLamaModel::tokenToString(Token id) const { std::vector result(8, 0); - const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false); + const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false); + int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true); GGML_ASSERT(check == -n_tokens); } else {