gpt4all/gpt4all-backend/llamamodel_impl.h
AT 48275d0dcc
Dlopen backend 5 (#779)
Major change to the backend that allows for pluggable versions of llama.cpp/ggml. This was squashed merged from dlopen_backend_5 where the history is preserved.
2023-05-31 17:04:01 -04:00

40 lines
1.3 KiB
C++

#ifndef LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#error This file is NOT meant to be included outside of llamamodel.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#endif
#ifndef LLAMAMODEL_H
#define LLAMAMODEL_H
#include <string>
#include <functional>
#include <vector>
#include "llmodel.h"
struct LLamaPrivate;
class LLamaModel : public LLModel {
public:
LLamaModel();
~LLamaModel();
bool loadModel(const std::string &modelPath) override;
bool isModelLoaded() const override;
size_t stateSize() const override;
size_t saveState(uint8_t *dest) const override;
size_t restoreState(const uint8_t *src) override;
void prompt(const std::string &prompt,
std::function<bool(int32_t)> promptCallback,
std::function<bool(int32_t, const std::string&)> responseCallback,
std::function<bool(bool)> recalculateCallback,
PromptContext &ctx) override;
void setThreadCount(int32_t n_threads) override;
int32_t threadCount() const override;
protected:
void recalculateContext(PromptContext &promptCtx,
std::function<bool(bool)> recalculate) override;
private:
LLamaPrivate *d_ptr;
};
#endif // LLAMAMODEL_H