#define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE #include "llamamodel_impl.h" #include #include #include #include #include #include #include #include #include #if defined(_WIN32) && defined(_MSC_VER) #define WIN32_LEAN_AND_MEAN #ifndef NOMINMAX #define NOMINMAX #endif #include #include #include #else #include #endif #include #include #include #include #include namespace { const char *modelType_ = "LLaMA"; } struct gpt_params { int32_t seed = -1; // RNG seed int32_t n_keep = 0; // number of tokens to keep from initial prompt #if LLAMA_DATE <= 230511 int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) #endif #if LLAMA_DATE >= 230519 // sampling parameters float tfs_z = 1.0f; // 1.0 = disabled float typical_p = 1.0f; // 1.0 = disabled #endif std::string prompt = ""; bool memory_f16 = true; // use f16 instead of f32 for memory kv bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory }; #if LLAMA_DATE >= 230519 static int llama_sample_top_p_top_k( llama_context *ctx, const llama_token *last_n_tokens_data, int last_n_tokens_size, int top_k, float top_p, float temp, float repeat_penalty) { auto logits = llama_get_logits(ctx); auto n_vocab = llama_n_vocab(ctx); // Populate initial list of all candidates std::vector candidates; candidates.reserve(n_vocab); for (int token_id = 0; token_id < n_vocab; token_id++) { candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); } llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false}; // Sample repeat penalty llama_sample_repetition_penalty(nullptr, &candidates_p, last_n_tokens_data, last_n_tokens_size, repeat_penalty); // Temperature sampling llama_sample_top_k(ctx, &candidates_p, top_k, 1); llama_sample_tail_free(ctx, &candidates_p, 1.0f, 1); llama_sample_typical(ctx, &candidates_p, 1.0f, 1); llama_sample_top_p(ctx, &candidates_p, top_p, 1); llama_sample_temperature(ctx, &candidates_p, temp); return llama_sample_token(ctx, &candidates_p); } #endif struct LLamaPrivate { const std::string modelPath; bool modelLoaded; llama_context *ctx = nullptr; llama_context_params params; int64_t n_threads = 0; bool empty = true; }; LLamaModel::LLamaModel() : d_ptr(new LLamaPrivate) { d_ptr->modelLoaded = false; } bool LLamaModel::loadModel(const std::string &modelPath) { // load the model d_ptr->params = llama_context_default_params(); gpt_params params; d_ptr->params.n_ctx = 2048; d_ptr->params.seed = params.seed; d_ptr->params.f16_kv = params.memory_f16; d_ptr->params.use_mmap = params.use_mmap; d_ptr->params.use_mlock = params.use_mlock; #if LLAMA_DATE <= 230511 d_ptr->params.n_parts = params.n_parts; #endif d_ptr->ctx = llama_init_from_file(modelPath.c_str(), d_ptr->params); if (!d_ptr->ctx) { std::cerr << "LLAMA ERROR: failed to load model from " << modelPath << std::endl; return false; } d_ptr->n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); d_ptr->modelLoaded = true; fflush(stderr); return true; } void LLamaModel::setThreadCount(int32_t n_threads) { d_ptr->n_threads = n_threads; } int32_t LLamaModel::threadCount() const { return d_ptr->n_threads; } LLamaModel::~LLamaModel() { llama_free(d_ptr->ctx); } bool LLamaModel::isModelLoaded() const { return d_ptr->modelLoaded; } size_t LLamaModel::stateSize() const { return llama_get_state_size(d_ptr->ctx); } size_t LLamaModel::saveState(uint8_t *dest) const { return llama_copy_state_data(d_ptr->ctx, dest); } size_t LLamaModel::restoreState(const uint8_t *src) { // const_cast is required, see: https://github.com/ggerganov/llama.cpp/pull/1540 return llama_set_state_data(d_ptr->ctx, const_cast(src)); } void LLamaModel::prompt(const std::string &prompt, std::function promptCallback, std::function responseCallback, std::function recalculateCallback, PromptContext &promptCtx) { if (!isModelLoaded()) { std::cerr << "LLAMA ERROR: prompt won't work with an unloaded model!\n"; return; } gpt_params params; params.prompt = prompt; // Add a space in front of the first character to match OG llama tokenizer behavior params.prompt.insert(0, 1, ' '); // tokenize the prompt std::vector embd_inp(params.prompt.size() + 4); int n = llama_tokenize(d_ptr->ctx, params.prompt.c_str(), embd_inp.data(), embd_inp.size(), d_ptr->empty); assert(n >= 0); embd_inp.resize(n); d_ptr->empty = false; // save the context size promptCtx.n_ctx = llama_n_ctx(d_ptr->ctx); if ((int) embd_inp.size() > promptCtx.n_ctx - 4) { responseCallback(-1, "The prompt size exceeds the context window size and cannot be processed."); std::cerr << "LLAMA ERROR: The prompt is" << embd_inp.size() << "tokens and the context window is" << promptCtx.n_ctx << "!\n"; return; } promptCtx.n_predict = std::min(promptCtx.n_predict, promptCtx.n_ctx - (int) embd_inp.size()); promptCtx.n_past = std::min(promptCtx.n_past, promptCtx.n_ctx); // number of tokens to keep when resetting context params.n_keep = (int)embd_inp.size(); // process the prompt in batches size_t i = 0; while (i < embd_inp.size()) { size_t batch_end = std::min(i + promptCtx.n_batch, embd_inp.size()); std::vector batch(embd_inp.begin() + i, embd_inp.begin() + batch_end); // Check if the context has run out... if (promptCtx.n_past + int32_t(batch.size()) > promptCtx.n_ctx) { const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase; // Erase the first percentage of context from the tokens... std::cerr << "LLAMA: reached the end of the context window so resizing\n"; promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint); promptCtx.n_past = promptCtx.tokens.size(); recalculateContext(promptCtx, recalculateCallback); assert(promptCtx.n_past + int32_t(batch.size()) <= promptCtx.n_ctx); } if (!evalTokens(promptCtx, batch)) { std::cerr << "LLAMA ERROR: Failed to process prompt\n"; return; } size_t tokens = batch_end - i; for (size_t t = 0; t < tokens; ++t) { if (int32_t(promptCtx.tokens.size()) == promptCtx.n_ctx) promptCtx.tokens.erase(promptCtx.tokens.begin()); promptCtx.tokens.push_back(batch.at(t)); if (!promptCallback(batch.at(t))) return; } promptCtx.n_past += batch.size(); i = batch_end; } std::string cachedResponse; std::vector cachedTokens; std::unordered_set reversePrompts = { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" }; // predict next tokens for (int i = 0; i < promptCtx.n_predict; i++) { // sample next token const size_t n_prev_toks = std::min((size_t) promptCtx.repeat_last_n, promptCtx.tokens.size()); llama_token id = llama_sample_top_p_top_k(d_ptr->ctx, promptCtx.tokens.data() + promptCtx.tokens.size() - n_prev_toks, n_prev_toks, promptCtx.top_k, promptCtx.top_p, promptCtx.temp, promptCtx.repeat_penalty); // Check if the context has run out... if (promptCtx.n_past + 1 > promptCtx.n_ctx) { const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase; // Erase the first percentage of context from the tokens... std::cerr << "LLAMA: reached the end of the context window so resizing\n"; promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint); promptCtx.n_past = promptCtx.tokens.size(); recalculateContext(promptCtx, recalculateCallback); assert(promptCtx.n_past + 1 <= promptCtx.n_ctx); } if (!evalTokens(promptCtx, { id })) { std::cerr << "LLAMA ERROR: Failed to predict next token\n"; return; } promptCtx.n_past += 1; // display text if (id == llama_token_eos()) return; const std::string str = llama_token_to_str(d_ptr->ctx, id); // Check if the provided str is part of our reverse prompts bool foundPartialReversePrompt = false; const std::string completed = cachedResponse + str; if (reversePrompts.find(completed) != reversePrompts.end()) { return; } // Check if it partially matches our reverse prompts and if so, cache for (const auto &s : reversePrompts) { if (s.compare(0, completed.size(), completed) == 0) { foundPartialReversePrompt = true; cachedResponse = completed; break; } } // Regardless the token gets added to our cache cachedTokens.push_back(id); // Continue if we have found a partial match if (foundPartialReversePrompt) continue; // Empty the cache for (auto t : cachedTokens) { if (int32_t(promptCtx.tokens.size()) == promptCtx.n_ctx) promptCtx.tokens.erase(promptCtx.tokens.begin()); promptCtx.tokens.push_back(t); if (!responseCallback(t, llama_token_to_str(d_ptr->ctx, t))) return; } cachedTokens.clear(); } } bool LLamaModel::evalTokens(PromptContext &ctx, const std::vector &tokens) { return llama_eval(d_ptr->ctx, tokens.data(), tokens.size(), ctx.n_past, d_ptr->n_threads) == 0; } #if defined(_WIN32) #define DLL_EXPORT __declspec(dllexport) #else #define DLL_EXPORT __attribute__ ((visibility ("default"))) #endif extern "C" { DLL_EXPORT bool is_g4a_backend_model_implementation() { return true; } DLL_EXPORT const char *get_model_type() { return modelType_; } DLL_EXPORT const char *get_build_variant() { return GGML_BUILD_VARIANT; } DLL_EXPORT bool magic_match(std::istream& f) { // Check magic uint32_t magic = 0; f.read(reinterpret_cast(&magic), sizeof(magic)); if (magic != 0x67676a74) return false; // Check version uint32_t version = 0; f.read(reinterpret_cast(&version), sizeof(version)); return version LLAMA_VERSIONS; } DLL_EXPORT LLModel *construct() { return new LLamaModel; } }