diff --git a/gpt4all-backend/dlhandle.h b/gpt4all-backend/dlhandle.h index 1c23c101..a3b56e75 100644 --- a/gpt4all-backend/dlhandle.h +++ b/gpt4all-backend/dlhandle.h @@ -44,11 +44,11 @@ public: } template - T* get(const std::string& fname) { + T* get(const std::string& fname) const { auto fres = reinterpret_cast(dlsym(chandle, fname.c_str())); return (dlerror()==NULL)?fres:nullptr; } - auto get_fnc(const std::string& fname) { + auto get_fnc(const std::string& fname) const { return get(fname); } }; @@ -90,10 +90,10 @@ public: } template - T* get(const std::string& fname) { + T* get(const std::string& fname) const { return reinterpret_cast(GetProcAddress(chandle, fname.c_str())); } - auto get_fnc(const std::string& fname) { + auto get_fnc(const std::string& fname) const { return get(fname); } }; diff --git a/gpt4all-backend/gptj.cpp b/gpt4all-backend/gptj.cpp index 302c7ee4..5f0fbbe5 100644 --- a/gpt4all-backend/gptj.cpp +++ b/gpt4all-backend/gptj.cpp @@ -834,8 +834,6 @@ struct GPTJPrivate { GPTJ::GPTJ() : d_ptr(new GPTJPrivate) { - modelType = modelType_; - d_ptr->model = new gptj_model; d_ptr->modelLoaded = false; } diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 9830d08a..c819aba4 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -95,8 +95,6 @@ struct LLamaPrivate { LLamaModel::LLamaModel() : d_ptr(new LLamaPrivate) { - modelType = modelType_; - d_ptr->modelLoaded = false; } diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index 07c56a34..f46b4c95 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -5,6 +5,7 @@ #include #include #include +#include @@ -24,12 +25,27 @@ bool requires_avxonly() { } -static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVariant) { - // Collect all model implementation libraries - // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the - // individual models without the cleanup of the static list interfering - static auto* libs = new std::vector([] () { - std::vector fres; +LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : dlhandle(std::move(dlhandle_)) { + auto get_model_type = dlhandle.get("get_model_type"); + assert(get_model_type); + modelType = get_model_type(); + auto get_build_variant = dlhandle.get("get_build_variant"); + assert(get_build_variant); + buildVariant = get_build_variant(); + magicMatch = dlhandle.get("magic_match"); + assert(magicMatch); + construct_ = dlhandle.get("construct"); + assert(construct_); +} + +bool LLModel::Implementation::isImplementation(const Dlhandle &dl) { + return dl.get("is_g4a_backend_model_implementation"); +} + + +const std::vector &LLModel::getImplementationList() { + static auto* libs = new std::vector([] () { + std::vector fres; auto search_in_directory = [&](const std::filesystem::path& path) { // Iterate over all libraries @@ -41,9 +57,10 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa // Add to list if model implementation try { Dlhandle dl(p.string()); - if (dl.get("is_g4a_backend_model_implementation")) { - fres.emplace_back(std::move(dl)); + if (!Implementation::isImplementation(dl)) { + continue; } + fres.emplace_back(Implementation(std::move(dl))); } catch (...) {} } }; @@ -54,21 +71,24 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa #endif return fres; }()); + // Return static result + return *libs; +} + +const LLModel::Implementation* LLModel::getImplementation(std::ifstream& f, const std::string& buildVariant) { // Iterate over all libraries - for (auto& dl : *libs) { + for (const auto& i : getImplementationList()) { f.seekg(0); // Check that magic matches - auto magic_match = dl.get("magic_match"); - if (!magic_match || !magic_match(f)) { + if (!i.magicMatch(f)) { continue; } // Check that build variant is correct - auto get_build_variant = dl.get("get_build_variant"); - if (buildVariant != (get_build_variant?get_build_variant():"default")) { + if (buildVariant != i.buildVariant) { continue; } // Looks like we're good to go, return this dlhandle - return &dl; + return &i; } // Nothing found, so return nothing return nullptr; @@ -87,14 +107,9 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria std::ifstream f(modelPath, std::ios::binary); if (!f) return nullptr; // Get correct implementation - auto impl = get_implementation(f, buildVariant); + auto impl = getImplementation(f, buildVariant); if (!impl) return nullptr; f.close(); - // Get inference constructor - auto constructor = impl->get("construct"); - if (!constructor) return nullptr; - // Construct llmodel implementation - auto fres = constructor(); - // Return final instance - return fres; + // Construct and return llmodel implementation + return impl->construct(); } diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index 4bcf2716..6b6828bf 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -1,23 +1,35 @@ #ifndef LLMODEL_H #define LLMODEL_H +#include "dlhandle.h" + #include #include #include +#include +#include #include class LLModel { public: - explicit LLModel() {} - virtual ~LLModel() {} + class Implementation { + LLModel *(*construct_)(); - static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default"); + public: + Implementation(Dlhandle&&); - virtual bool loadModel(const std::string &modelPath) = 0; - virtual bool isModelLoaded() const = 0; - virtual size_t stateSize() const { return 0; } - virtual size_t saveState(uint8_t */*dest*/) const { return 0; } - virtual size_t restoreState(const uint8_t */*src*/) { return 0; } + static bool isImplementation(const Dlhandle&); + + std::string_view modelType, buildVariant; + bool (*magicMatch)(std::ifstream& f); + Dlhandle dlhandle; + + LLModel *construct() const { + auto fres = construct_(); + fres->implementation = this; + return fres; + } + }; struct PromptContext { std::vector logits; // logits of current context std::vector tokens; // current tokens in the context window @@ -31,8 +43,17 @@ public: float repeat_penalty = 1.10f; int32_t repeat_last_n = 64; // last n tokens to penalize float contextErase = 0.75f; // percent of context to erase if we exceed the context - // window + // window }; + + explicit LLModel() {} + virtual ~LLModel() {} + + virtual bool loadModel(const std::string &modelPath) = 0; + virtual bool isModelLoaded() const = 0; + virtual size_t stateSize() const { return 0; } + virtual size_t saveState(uint8_t */*dest*/) const { return 0; } + virtual size_t restoreState(const uint8_t */*src*/) { return 0; } virtual void prompt(const std::string &prompt, std::function promptCallback, std::function responseCallback, @@ -41,15 +62,18 @@ public: virtual void setThreadCount(int32_t /*n_threads*/) {} virtual int32_t threadCount() const { return 1; } - const char *getModelType() const { - return modelType; + const Implementation& getImplementation() const { + return *implementation; } + static const std::vector& getImplementationList(); + static const Implementation *getImplementation(std::ifstream& f, const std::string& buildVariant); + static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default"); + protected: + const Implementation *implementation; + virtual void recalculateContext(PromptContext &promptCtx, std::function recalculate) = 0; - - const char *modelType; }; - #endif // LLMODEL_H diff --git a/gpt4all-backend/mpt.cpp b/gpt4all-backend/mpt.cpp index e526f5ac..b1bbf6e9 100644 --- a/gpt4all-backend/mpt.cpp +++ b/gpt4all-backend/mpt.cpp @@ -758,8 +758,6 @@ struct MPTPrivate { MPT::MPT() : d_ptr(new MPTPrivate) { - modelType = modelType_; - d_ptr->model = new mpt_model; d_ptr->modelLoaded = false; } diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 1638a4f6..a419f995 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -216,7 +216,7 @@ bool ChatLLM::loadModel(const QString &modelName) m_modelInfo.model = LLModel::construct(filePath.toStdString()); if (m_modelInfo.model) { m_modelInfo.model->loadModel(filePath.toStdString()); - switch (m_modelInfo.model->getModelType()[0]) { + switch (m_modelInfo.model->getImplementation().modelType[0]) { case 'L': m_modelType = LLModelType::LLAMA_; break; case 'G': m_modelType = LLModelType::GPTJ_; break; case 'M': m_modelType = LLModelType::MPT_; break;