mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-11 19:11:37 +00:00
Advanced avxonly autodetection (#744)
* Advanced avxonly requirement detection
This commit is contained in:
parent
cae757aacd
commit
92407438c8
@ -6,6 +6,24 @@
|
||||
#include <fstream>
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
|
||||
static
|
||||
bool requires_avxonly() {
|
||||
#ifdef __x86_64__
|
||||
#ifndef _MSC_VER
|
||||
return !__builtin_cpu_supports("avx2");
|
||||
#else
|
||||
int cpuInfo[4];
|
||||
__cpuidex(cpuInfo, 7, 0);
|
||||
return !(cpuInfo[1] & (1 << 5));
|
||||
#endif
|
||||
#else
|
||||
return false; // Don't know how to handle non-x86_64
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVariant) {
|
||||
// Collect all model implementation libraries
|
||||
// NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the
|
||||
@ -56,14 +74,6 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static bool requires_avxonly() {
|
||||
#ifdef __x86_64__
|
||||
return !__builtin_cpu_supports("avx2") && !__builtin_cpu_supports("fma");
|
||||
#else
|
||||
return false; // Don't know how to handle ARM
|
||||
#endif
|
||||
}
|
||||
|
||||
LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) {
|
||||
//TODO: Auto-detect
|
||||
if (buildVariant == "auto") {
|
||||
|
Loading…
Reference in New Issue
Block a user