gpt4all/gpt4all-backend/gptj_impl.h

42 lines
1.4 KiB
C
Raw Normal View History

#ifndef GPTJ_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#error This file is NOT meant to be included outside of gptj.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define GPTJ_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#endif
2023-04-09 03:28:39 +00:00
#ifndef GPTJ_H
#define GPTJ_H
#include <string>
#include <functional>
2023-04-10 19:04:40 +00:00
#include <vector>
#include "llmodel.h"
2023-04-09 03:28:39 +00:00
struct GPTJPrivate;
class GPTJ : public LLModel {
2023-04-09 03:28:39 +00:00
public:
GPTJ();
~GPTJ();
2023-07-09 15:32:51 +00:00
bool supportsEmbedding() const override { return false; }
bool supportsCompletion() const override { return true; }
bool loadModel(const std::string &modelPath) override;
bool isModelLoaded() const override;
size_t requiredMem(const std::string &modelPath) override;
2023-05-05 14:00:05 +00:00
size_t stateSize() const override;
size_t saveState(uint8_t *dest) const override;
size_t restoreState(const uint8_t *src) override;
2023-04-18 13:46:03 +00:00
void setThreadCount(int32_t n_threads) override;
int32_t threadCount() const override;
2023-04-09 03:28:39 +00:00
private:
GPTJPrivate *d_ptr;
protected:
std::vector<Token> tokenize(PromptContext &, const std::string&) const override;
Token sampleToken(PromptContext &ctx) const override;
std::string tokenToString(Token) const override;
bool evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const override;
int32_t contextLength() const override;
const std::vector<Token>& endTokens() const override;
2023-04-09 03:28:39 +00:00
};
2023-04-18 13:46:03 +00:00
#endif // GPTJ_H