2023-05-31 21:04:01 +00:00
# ifndef LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
# error This file is NOT meant to be included outside of llamamodel.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
# endif
2023-04-15 19:57:32 +00:00
# ifndef LLAMAMODEL_H
# define LLAMAMODEL_H
# include <string>
# include <functional>
# include <vector>
# include "llmodel.h"
2023-05-31 21:04:01 +00:00
struct LLamaPrivate ;
2023-04-15 19:57:32 +00:00
class LLamaModel : public LLModel {
public :
LLamaModel ( ) ;
~ LLamaModel ( ) ;
2023-07-09 15:32:51 +00:00
bool supportsEmbedding ( ) const override { return false ; }
bool supportsCompletion ( ) const override { return true ; }
2023-04-15 19:57:32 +00:00
bool loadModel ( const std : : string & modelPath ) override ;
bool isModelLoaded ( ) const override ;
2023-06-26 19:17:34 +00:00
size_t requiredMem ( const std : : string & modelPath ) override ;
2023-05-04 19:31:41 +00:00
size_t stateSize ( ) const override ;
size_t saveState ( uint8_t * dest ) const override ;
size_t restoreState ( const uint8_t * src ) override ;
2023-04-15 19:57:32 +00:00
void setThreadCount ( int32_t n_threads ) override ;
2023-05-21 20:45:29 +00:00
int32_t threadCount ( ) const override ;
2023-04-15 19:57:32 +00:00
private :
LLamaPrivate * d_ptr ;
2023-06-04 12:59:24 +00:00
protected :
2023-06-04 23:31:00 +00:00
std : : vector < Token > tokenize ( PromptContext & , const std : : string & ) const override ;
2023-06-13 11:14:02 +00:00
std : : string tokenToString ( Token ) const override ;
2023-06-04 12:59:24 +00:00
Token sampleToken ( PromptContext & ctx ) const override ;
bool evalTokens ( PromptContext & ctx , const std : : vector < int32_t > & tokens ) const override ;
int32_t contextLength ( ) const override ;
const std : : vector < Token > & endTokens ( ) const override ;
2023-04-15 19:57:32 +00:00
} ;
2023-05-31 21:04:01 +00:00
# endif // LLAMAMODEL_H