Explicitly clear the kv cache each time we eval tokens to match n_past. (#1808)

update-llama.cpp
AT 9 months ago committed by GitHub
parent 2d566710e5
commit 96cee4f9ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -298,6 +298,8 @@ LLModel::Token LLamaModel::sampleToken(PromptContext &promptCtx) const
bool LLamaModel::evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const
{
llama_kv_cache_seq_rm(d_ptr->ctx, 0, ctx.n_past, -1);
llama_batch batch = llama_batch_init(tokens.size(), 0, 1);
batch.n_tokens = tokens.size();

Loading…
Cancel
Save