Dlopen better implementation management (Version 2)

This commit is contained in:
niansa 2023-05-31 21:37:25 +02:00 committed by Adam Treat
parent 991a0e4bd8
commit b68d359b4f
7 changed files with 80 additions and 47 deletions

View File

@ -44,11 +44,11 @@ public:
} }
template<typename T> template<typename T>
T* get(const std::string& fname) { T* get(const std::string& fname) const {
auto fres = reinterpret_cast<T*>(dlsym(chandle, fname.c_str())); auto fres = reinterpret_cast<T*>(dlsym(chandle, fname.c_str()));
return (dlerror()==NULL)?fres:nullptr; return (dlerror()==NULL)?fres:nullptr;
} }
auto get_fnc(const std::string& fname) { auto get_fnc(const std::string& fname) const {
return get<void*(...)>(fname); return get<void*(...)>(fname);
} }
}; };
@ -90,10 +90,10 @@ public:
} }
template<typename T> template<typename T>
T* get(const std::string& fname) { T* get(const std::string& fname) const {
return reinterpret_cast<T*>(GetProcAddress(chandle, fname.c_str())); return reinterpret_cast<T*>(GetProcAddress(chandle, fname.c_str()));
} }
auto get_fnc(const std::string& fname) { auto get_fnc(const std::string& fname) const {
return get<void*(...)>(fname); return get<void*(...)>(fname);
} }
}; };

View File

@ -834,8 +834,6 @@ struct GPTJPrivate {
GPTJ::GPTJ() GPTJ::GPTJ()
: d_ptr(new GPTJPrivate) { : d_ptr(new GPTJPrivate) {
modelType = modelType_;
d_ptr->model = new gptj_model; d_ptr->model = new gptj_model;
d_ptr->modelLoaded = false; d_ptr->modelLoaded = false;
} }

View File

@ -95,8 +95,6 @@ struct LLamaPrivate {
LLamaModel::LLamaModel() LLamaModel::LLamaModel()
: d_ptr(new LLamaPrivate) { : d_ptr(new LLamaPrivate) {
modelType = modelType_;
d_ptr->modelLoaded = false; d_ptr->modelLoaded = false;
} }

View File

@ -5,6 +5,7 @@
#include <vector> #include <vector>
#include <fstream> #include <fstream>
#include <filesystem> #include <filesystem>
#include <cassert>
@ -24,12 +25,27 @@ bool requires_avxonly() {
} }
static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVariant) { LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : dlhandle(std::move(dlhandle_)) {
// Collect all model implementation libraries auto get_model_type = dlhandle.get<const char *()>("get_model_type");
// NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the assert(get_model_type);
// individual models without the cleanup of the static list interfering modelType = get_model_type();
static auto* libs = new std::vector<Dlhandle>([] () { auto get_build_variant = dlhandle.get<const char *()>("get_build_variant");
std::vector<Dlhandle> fres; assert(get_build_variant);
buildVariant = get_build_variant();
magicMatch = dlhandle.get<bool(std::ifstream&)>("magic_match");
assert(magicMatch);
construct_ = dlhandle.get<LLModel *()>("construct");
assert(construct_);
}
bool LLModel::Implementation::isImplementation(const Dlhandle &dl) {
return dl.get<bool(uint32_t)>("is_g4a_backend_model_implementation");
}
const std::vector<LLModel::Implementation> &LLModel::getImplementationList() {
static auto* libs = new std::vector<LLModel::Implementation>([] () {
std::vector<LLModel::Implementation> fres;
auto search_in_directory = [&](const std::filesystem::path& path) { auto search_in_directory = [&](const std::filesystem::path& path) {
// Iterate over all libraries // Iterate over all libraries
@ -41,9 +57,10 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa
// Add to list if model implementation // Add to list if model implementation
try { try {
Dlhandle dl(p.string()); Dlhandle dl(p.string());
if (dl.get<bool(uint32_t)>("is_g4a_backend_model_implementation")) { if (!Implementation::isImplementation(dl)) {
fres.emplace_back(std::move(dl)); continue;
} }
fres.emplace_back(Implementation(std::move(dl)));
} catch (...) {} } catch (...) {}
} }
}; };
@ -54,21 +71,24 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa
#endif #endif
return fres; return fres;
}()); }());
// Return static result
return *libs;
}
const LLModel::Implementation* LLModel::getImplementation(std::ifstream& f, const std::string& buildVariant) {
// Iterate over all libraries // Iterate over all libraries
for (auto& dl : *libs) { for (const auto& i : getImplementationList()) {
f.seekg(0); f.seekg(0);
// Check that magic matches // Check that magic matches
auto magic_match = dl.get<bool(std::ifstream&)>("magic_match"); if (!i.magicMatch(f)) {
if (!magic_match || !magic_match(f)) {
continue; continue;
} }
// Check that build variant is correct // Check that build variant is correct
auto get_build_variant = dl.get<const char *()>("get_build_variant"); if (buildVariant != i.buildVariant) {
if (buildVariant != (get_build_variant?get_build_variant():"default")) {
continue; continue;
} }
// Looks like we're good to go, return this dlhandle // Looks like we're good to go, return this dlhandle
return &dl; return &i;
} }
// Nothing found, so return nothing // Nothing found, so return nothing
return nullptr; return nullptr;
@ -87,14 +107,9 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria
std::ifstream f(modelPath, std::ios::binary); std::ifstream f(modelPath, std::ios::binary);
if (!f) return nullptr; if (!f) return nullptr;
// Get correct implementation // Get correct implementation
auto impl = get_implementation(f, buildVariant); auto impl = getImplementation(f, buildVariant);
if (!impl) return nullptr; if (!impl) return nullptr;
f.close(); f.close();
// Get inference constructor // Construct and return llmodel implementation
auto constructor = impl->get<LLModel *()>("construct"); return impl->construct();
if (!constructor) return nullptr;
// Construct llmodel implementation
auto fres = constructor();
// Return final instance
return fres;
} }

View File

@ -1,23 +1,35 @@
#ifndef LLMODEL_H #ifndef LLMODEL_H
#define LLMODEL_H #define LLMODEL_H
#include "dlhandle.h"
#include <string> #include <string>
#include <functional> #include <functional>
#include <vector> #include <vector>
#include <string_view>
#include <fstream>
#include <cstdint> #include <cstdint>
class LLModel { class LLModel {
public: public:
explicit LLModel() {} class Implementation {
virtual ~LLModel() {} LLModel *(*construct_)();
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default"); public:
Implementation(Dlhandle&&);
virtual bool loadModel(const std::string &modelPath) = 0; static bool isImplementation(const Dlhandle&);
virtual bool isModelLoaded() const = 0;
virtual size_t stateSize() const { return 0; } std::string_view modelType, buildVariant;
virtual size_t saveState(uint8_t */*dest*/) const { return 0; } bool (*magicMatch)(std::ifstream& f);
virtual size_t restoreState(const uint8_t */*src*/) { return 0; } Dlhandle dlhandle;
LLModel *construct() const {
auto fres = construct_();
fres->implementation = this;
return fres;
}
};
struct PromptContext { struct PromptContext {
std::vector<float> logits; // logits of current context std::vector<float> logits; // logits of current context
std::vector<int32_t> tokens; // current tokens in the context window std::vector<int32_t> tokens; // current tokens in the context window
@ -33,6 +45,15 @@ public:
float contextErase = 0.75f; // percent of context to erase if we exceed the context float contextErase = 0.75f; // percent of context to erase if we exceed the context
// window // window
}; };
explicit LLModel() {}
virtual ~LLModel() {}
virtual bool loadModel(const std::string &modelPath) = 0;
virtual bool isModelLoaded() const = 0;
virtual size_t stateSize() const { return 0; }
virtual size_t saveState(uint8_t */*dest*/) const { return 0; }
virtual size_t restoreState(const uint8_t */*src*/) { return 0; }
virtual void prompt(const std::string &prompt, virtual void prompt(const std::string &prompt,
std::function<bool(int32_t)> promptCallback, std::function<bool(int32_t)> promptCallback,
std::function<bool(int32_t, const std::string&)> responseCallback, std::function<bool(int32_t, const std::string&)> responseCallback,
@ -41,15 +62,18 @@ public:
virtual void setThreadCount(int32_t /*n_threads*/) {} virtual void setThreadCount(int32_t /*n_threads*/) {}
virtual int32_t threadCount() const { return 1; } virtual int32_t threadCount() const { return 1; }
const char *getModelType() const { const Implementation& getImplementation() const {
return modelType; return *implementation;
} }
static const std::vector<Implementation>& getImplementationList();
static const Implementation *getImplementation(std::ifstream& f, const std::string& buildVariant);
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default");
protected: protected:
const Implementation *implementation;
virtual void recalculateContext(PromptContext &promptCtx, virtual void recalculateContext(PromptContext &promptCtx,
std::function<bool(bool)> recalculate) = 0; std::function<bool(bool)> recalculate) = 0;
const char *modelType;
}; };
#endif // LLMODEL_H #endif // LLMODEL_H

View File

@ -758,8 +758,6 @@ struct MPTPrivate {
MPT::MPT() MPT::MPT()
: d_ptr(new MPTPrivate) { : d_ptr(new MPTPrivate) {
modelType = modelType_;
d_ptr->model = new mpt_model; d_ptr->model = new mpt_model;
d_ptr->modelLoaded = false; d_ptr->modelLoaded = false;
} }

View File

@ -216,7 +216,7 @@ bool ChatLLM::loadModel(const QString &modelName)
m_modelInfo.model = LLModel::construct(filePath.toStdString()); m_modelInfo.model = LLModel::construct(filePath.toStdString());
if (m_modelInfo.model) { if (m_modelInfo.model) {
m_modelInfo.model->loadModel(filePath.toStdString()); m_modelInfo.model->loadModel(filePath.toStdString());
switch (m_modelInfo.model->getModelType()[0]) { switch (m_modelInfo.model->getImplementation().modelType[0]) {
case 'L': m_modelType = LLModelType::LLAMA_; break; case 'L': m_modelType = LLModelType::LLAMA_; break;
case 'G': m_modelType = LLModelType::GPTJ_; break; case 'G': m_modelType = LLModelType::GPTJ_; break;
case 'M': m_modelType = LLModelType::MPT_; break; case 'M': m_modelType = LLModelType::MPT_; break;