From 5f95aa9fc6969fdd15c2627ef612998f0c4f44a3 Mon Sep 17 00:00:00 2001 From: AT Date: Sun, 4 Jun 2023 15:28:58 -0400 Subject: [PATCH] We no longer have an avx_only repository and better error handling for minimum hardware requirements. (#833) --- gpt4all-backend/llmodel.cpp | 18 ++++++++++++++++++ gpt4all-chat/CMakeLists.txt | 15 +-------------- gpt4all-chat/cmake/config.h.in | 1 - gpt4all-chat/llm.cpp | 21 +++++++++++++-------- gpt4all-chat/main.qml | 2 +- gpt4all-chat/network.cpp | 5 ----- 6 files changed, 33 insertions(+), 29 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index b2af4d2a..c60a4d3b 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -11,6 +11,20 @@ std::string LLModel::m_implementations_search_path = "."; +static bool has_at_least_minimal_hardware() { +#ifdef __x86_64__ + #ifndef _MSC_VER + return __builtin_cpu_supports("avx"); + #else + int cpuInfo[4]; + __cpuid(cpuInfo, 1); + return cpuInfo[2] & (1 << 28); + #endif +#else + return true; // Don't know how to handle non-x86_64 +#endif +} + static bool requires_avxonly() { #ifdef __x86_64__ #ifndef _MSC_VER @@ -98,6 +112,10 @@ const LLModel::Implementation* LLModel::implementation(std::ifstream& f, const s } LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) { + + if (!has_at_least_minimal_hardware()) + return nullptr; + //TODO: Auto-detect CUDA/OpenCL if (buildVariant == "auto") { if (requires_avxonly()) { diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 51b9d517..4f2f8705 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -30,7 +30,6 @@ set(CMAKE_AUTORCC ON) set(CMAKE_CXX_STANDARD_REQUIRED ON) option(GPT4ALL_LOCALHOST OFF "Build installer for localhost repo") -option(GPT4ALL_AVX_ONLY OFF "Build for avx only") option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF) # Generate a header file with the version number @@ -215,23 +214,11 @@ elseif(GPT4ALL_OFFLINE_INSTALLER) cpack_ifw_add_repository("GPT4AllRepository" URL "file://${CMAKE_BINARY_DIR}/packages") else() if(${CMAKE_SYSTEM_NAME} MATCHES Linux) - if (GPT4ALL_AVX_ONLY) - cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/linux/repository") - else() cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository") - endif() elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows) - #To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target - if (GPT4ALL_AVX_ONLY) - cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/windows/repository") - else() + #To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository") - endif() elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin) - if (GPT4ALL_AVX_ONLY) - cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/avx_only/mac/repository") - else() cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository") - endif() endif() endif() diff --git a/gpt4all-chat/cmake/config.h.in b/gpt4all-chat/cmake/config.h.in index e578a82d..c6b77b5b 100644 --- a/gpt4all-chat/cmake/config.h.in +++ b/gpt4all-chat/cmake/config.h.in @@ -2,6 +2,5 @@ #define CONFIG_H #define APP_VERSION "@APP_VERSION@" -#define GPT4ALL_AVX_ONLY "@GPT4ALL_AVX_ONLY@" #endif // CONFIG_H diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index 41478adc..075eecf9 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -30,15 +30,20 @@ LLM::LLM() connect(this, &LLM::serverEnabledChanged, m_chatListModel, &ChatListModel::handleServerEnabledChanged); -#if defined(__x86_64__) || defined(__i386__) - if (QString(GPT4ALL_AVX_ONLY) == "OFF") { - const bool avx(__builtin_cpu_supports("avx")); - const bool avx2(__builtin_cpu_supports("avx2")); - const bool fma(__builtin_cpu_supports("fma")); - m_compatHardware = avx && avx2 && fma; - emit compatHardwareChanged(); - } +#if defined(__x86_64__) + #ifndef _MSC_VER + const bool minimal(__builtin_cpu_supports("avx")); + #else + int cpuInfo[4]; + __cpuid(cpuInfo, 1); + const bool minimal(cpuInfo[2] & (1 << 28)); + #endif +#else + const bool minimal = true; // Don't know how to handle non-x86_64 #endif + + m_compatHardware = minimal; + emit compatHardwareChanged(); } bool LLM::checkForUpdates() const diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index f8493528..da4d30bb 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -95,7 +95,7 @@ Window { shouldShowBusy: false closePolicy: Popup.NoAutoClose modal: true - text: qsTr("Incompatible hardware detected. Please try the avx-only installer on https://gpt4all.io") + text: qsTr("Incompatible hardware detected. Your hardware does not meet the minimal requirements to run GPT4All. In particular, it does not seem to support AVX intrinsics. See here for more: https://en.wikipedia.org/wiki/Advanced_Vector_Extensions") } StartupDialog { diff --git a/gpt4all-chat/network.cpp b/gpt4all-chat/network.cpp index 3c003973..c72ff0c6 100644 --- a/gpt4all-chat/network.cpp +++ b/gpt4all-chat/network.cpp @@ -412,11 +412,6 @@ void Network::sendMixpanelEvent(const QString &ev, const QVector &valu const QSize display = QGuiApplication::primaryScreen()->size(); properties.insert("display", QString("%1x%2").arg(display.width()).arg(display.height())); properties.insert("ram", getSystemTotalRAM()); -#if defined(__x86_64__) || defined(__i386__) - properties.insert("avx", bool(__builtin_cpu_supports("avx"))); - properties.insert("avx2", bool(__builtin_cpu_supports("avx2"))); - properties.insert("fma", bool(__builtin_cpu_supports("fma"))); -#endif #if defined(Q_OS_MAC) properties.insert("cpu", QString::fromStdString(getCPUModel())); #endif