mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
9c6c09cbd2
Major change to the backend that allows for pluggable versions of llama.cpp/ggml. This was squashed merged from dlopen_backend_5 where the history is preserved.
40 lines
1.2 KiB
C++
40 lines
1.2 KiB
C++
#ifndef MPT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
|
#error This file is NOT meant to be included outside of mpt.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define MPT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
|
#endif
|
|
#ifndef MPT_H
|
|
#define MPT_H
|
|
|
|
#include <string>
|
|
#include <functional>
|
|
#include <vector>
|
|
#include "llmodel.h"
|
|
|
|
struct MPTPrivate;
|
|
class MPT : public LLModel {
|
|
public:
|
|
MPT();
|
|
~MPT();
|
|
|
|
bool loadModel(const std::string &modelPath) override;
|
|
bool isModelLoaded() const override;
|
|
size_t stateSize() const override;
|
|
size_t saveState(uint8_t *dest) const override;
|
|
size_t restoreState(const uint8_t *src) override;
|
|
void prompt(const std::string &prompt,
|
|
std::function<bool(int32_t)> promptCallback,
|
|
std::function<bool(int32_t, const std::string&)> responseCallback,
|
|
std::function<bool(bool)> recalculateCallback,
|
|
PromptContext &ctx) override;
|
|
void setThreadCount(int32_t n_threads) override;
|
|
int32_t threadCount() const override;
|
|
|
|
protected:
|
|
void recalculateContext(PromptContext &promptCtx,
|
|
std::function<bool(bool)> recalculate) override;
|
|
|
|
private:
|
|
MPTPrivate *d_ptr;
|
|
};
|
|
|
|
#endif // MPT_H
|