mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-18 03:25:46 +00:00
add Falcon 7B model
Tested with https://huggingface.co/TheBloke/falcon-7b-instruct-GGML/blob/main/falcon7b-instruct.ggmlv3.q4_0.bin
This commit is contained in:
parent
b8464073b8
commit
198b5e4832
@ -117,6 +117,10 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
|
||||
gptj.cpp utils.h utils.cpp llmodel_shared.cpp)
|
||||
prepare_target(gptj ggml-230511)
|
||||
|
||||
add_library(falcon-${BUILD_VARIANT} SHARED
|
||||
falcon.cpp utils.h utils.cpp llmodel_shared.cpp)
|
||||
prepare_target(falcon llama-mainline)
|
||||
|
||||
add_library(mpt-${BUILD_VARIANT} SHARED
|
||||
mpt.cpp utils.h utils.cpp llmodel_shared.cpp)
|
||||
prepare_target(mpt ggml-230511)
|
||||
|
1030
gpt4all-backend/falcon.cpp
Normal file
1030
gpt4all-backend/falcon.cpp
Normal file
File diff suppressed because it is too large
Load Diff
40
gpt4all-backend/falcon_impl.h
Normal file
40
gpt4all-backend/falcon_impl.h
Normal file
@ -0,0 +1,40 @@
|
||||
#ifndef FALCON_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
||||
#error This file is NOT meant to be included outside of falcon.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define FALCON_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
||||
#endif
|
||||
#ifndef FALCON_H
|
||||
#define FALCON_H
|
||||
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "llmodel.h"
|
||||
|
||||
struct FalconPrivate;
|
||||
class Falcon : public LLModel {
|
||||
public:
|
||||
Falcon();
|
||||
~Falcon();
|
||||
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
size_t stateSize() const override;
|
||||
size_t saveState(uint8_t *dest) const override;
|
||||
size_t restoreState(const uint8_t *src) override;
|
||||
void setThreadCount(int32_t n_threads) override;
|
||||
int32_t threadCount() const override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<FalconPrivate> d_ptr;
|
||||
|
||||
protected:
|
||||
std::vector<Token> tokenize(PromptContext &, const std::string&) const override;
|
||||
Token sampleToken(PromptContext &ctx) const override;
|
||||
std::string tokenToString(Token) const override;
|
||||
bool evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const override;
|
||||
int32_t contextLength() const override;
|
||||
const std::vector<Token>& endTokens() const override;
|
||||
};
|
||||
|
||||
#endif // Falcon_H
|
@ -282,14 +282,15 @@ DLL_EXPORT bool magic_match(std::istream& f) {
|
||||
if (!(version LLAMA_VERSIONS)) {
|
||||
return false;
|
||||
}
|
||||
llama_file_hparams hparams;
|
||||
f.read(reinterpret_cast<char*>(&hparams), sizeof(hparams));
|
||||
if (!(hparams.n_vocab >= 32000 && hparams.n_vocab <= 32100)) {
|
||||
return false; // not a llama.
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
// Check quant supported on metal
|
||||
// skip fields
|
||||
off_t offset = sizeof(uint32_t) * 6; // n_vocab, n_embd, n_mult, n_head, n_layer, n_rot
|
||||
f.seekg(offset, std::ios_base::cur);
|
||||
uint32_t ftype;
|
||||
f.read(reinterpret_cast<char*>(&ftype), sizeof(ftype)); // ftype
|
||||
switch((enum llama_ftype) ftype) {
|
||||
switch(hparams.ftype) {
|
||||
// currently supported on Metal https://github.com/ggerganov/llama.cpp/blob/ae9663f1887513e152839e91f61c513075a19422/ggml-metal.m#L51-L55
|
||||
case LLAMA_FTYPE_MOSTLY_F16:
|
||||
case LLAMA_FTYPE_MOSTLY_Q2_K:
|
||||
|
@ -178,6 +178,8 @@ install(TARGETS llamamodel-mainline-default DESTINATION lib COMPONENT ${COMPONEN
|
||||
if(APPLE)
|
||||
install(TARGETS llamamodel-mainline-metal DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
endif()
|
||||
install(TARGETS falcon-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS falcon-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS mpt-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS mpt-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS replit-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
|
@ -224,6 +224,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
case 'G': m_llModelType = LLModelType::GPTJ_; break;
|
||||
case 'M': m_llModelType = LLModelType::MPT_; break;
|
||||
case 'R': m_llModelType = LLModelType::REPLIT_; break;
|
||||
case 'F': m_llModelType = LLModelType::FALCON_; break;
|
||||
default:
|
||||
{
|
||||
delete std::exchange(m_llModelInfo.model, nullptr);
|
||||
|
@ -14,7 +14,8 @@ enum LLModelType {
|
||||
GPTJ_,
|
||||
LLAMA_,
|
||||
CHATGPT_,
|
||||
REPLIT_
|
||||
REPLIT_,
|
||||
FALCON_
|
||||
};
|
||||
|
||||
struct LLModelInfo {
|
||||
|
Loading…
Reference in New Issue
Block a user