llamamodel: fix static vector in LLamaModel::endTokens

This commit is contained in:
Cebtenzzre 2023-10-04 15:12:10 -04:00 committed by Adam Treat
parent b4d82ea289
commit 088afada49

View File

@ -93,6 +93,7 @@ struct LLamaPrivate {
llama_context *ctx = nullptr; llama_context *ctx = nullptr;
llama_context_params params; llama_context_params params;
int64_t n_threads = 0; int64_t n_threads = 0;
std::vector<LLModel::Token> end_tokens;
}; };
LLamaModel::LLamaModel() LLamaModel::LLamaModel()
@ -176,6 +177,8 @@ bool LLamaModel::loadModel(const std::string &modelPath)
return false; return false;
} }
d_ptr->end_tokens = {llama_token_eos(d_ptr->ctx)};
#ifdef GGML_USE_KOMPUTE #ifdef GGML_USE_KOMPUTE
if (ggml_vk_has_device()) { if (ggml_vk_has_device()) {
std::cerr << "llama.cpp: using Vulkan on " << ggml_vk_current_device().name << std::endl; std::cerr << "llama.cpp: using Vulkan on " << ggml_vk_current_device().name << std::endl;
@ -259,8 +262,7 @@ int32_t LLamaModel::contextLength() const
const std::vector<LLModel::Token> &LLamaModel::endTokens() const const std::vector<LLModel::Token> &LLamaModel::endTokens() const
{ {
static const std::vector<LLModel::Token> fres = {llama_token_eos(d_ptr->ctx)}; return d_ptr->end_tokens;
return fres;
} }
#if defined(GGML_USE_KOMPUTE) #if defined(GGML_USE_KOMPUTE)