mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-06 09:20:33 +00:00
38c61493d2
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
47 lines
916 B
C++
47 lines
916 B
C++
#pragma once
|
|
#include <cstdint>
|
|
#include <cstddef>
|
|
#include <vector>
|
|
#include <ggml.h>
|
|
|
|
struct llm_buffer {
|
|
uint8_t * addr = NULL;
|
|
size_t size = 0;
|
|
|
|
void resize(size_t size) {
|
|
delete[] addr;
|
|
addr = new uint8_t[size];
|
|
this->size = size;
|
|
}
|
|
|
|
~llm_buffer() {
|
|
delete[] addr;
|
|
}
|
|
};
|
|
|
|
struct llm_kv_cache {
|
|
struct ggml_tensor * k;
|
|
struct ggml_tensor * v;
|
|
|
|
struct ggml_context * ctx = NULL;
|
|
|
|
llm_buffer buf;
|
|
|
|
int n; // number of tokens currently in the cache
|
|
|
|
~llm_kv_cache() {
|
|
if (ctx) {
|
|
ggml_free(ctx);
|
|
}
|
|
}
|
|
};
|
|
|
|
inline void ggml_graph_compute_g4a(llm_buffer& buf, ggml_cgraph * graph, int n_threads) {
|
|
struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
|
|
if (plan.work_size > 0) {
|
|
buf.resize(plan.work_size);
|
|
plan.work_data = buf.addr;
|
|
}
|
|
ggml_graph_compute(graph, &plan);
|
|
}
|