diff --git a/.gitmodules b/.gitmodules index eb06ee48..e00584ea 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "llama.cpp"] - path = gpt4all-chat/llmodel/llama.cpp + path = gpt4all-backend/llama.cpp url = https://github.com/manyoso/llama.cpp.git diff --git a/gpt4all-chat/llmodel/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt similarity index 76% rename from gpt4all-chat/llmodel/CMakeLists.txt rename to gpt4all-backend/CMakeLists.txt index 704faccc..a7f1c6f0 100644 --- a/gpt4all-chat/llmodel/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -15,7 +15,11 @@ endif() # Include the binary directory for the generated header file include_directories("${CMAKE_CURRENT_BINARY_DIR}") -project(llmodel VERSION ${APP_VERSION} LANGUAGES CXX C) +set(LLMODEL_VERSION_MAJOR 0) +set(LLMODEL_VERSION_MINOR 1) +set(LLMODEL_VERSION_PATCH 0) +set(LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}") +project(llmodel VERSION ${LLMODEL_VERSION} LANGUAGES CXX C) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -43,5 +47,9 @@ add_library(llmodel target_link_libraries(llmodel PRIVATE llama) +set_target_properties(llmodel PROPERTIES + VERSION ${PROJECT_VERSION} + SOVERSION ${PROJECT_VERSION_MAJOR}) + set(COMPONENT_NAME_MAIN ${PROJECT_NAME}) set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install) diff --git a/gpt4all-chat/llmodel/gptj.cpp b/gpt4all-backend/gptj.cpp similarity index 100% rename from gpt4all-chat/llmodel/gptj.cpp rename to gpt4all-backend/gptj.cpp diff --git a/gpt4all-chat/llmodel/gptj.h b/gpt4all-backend/gptj.h similarity index 100% rename from gpt4all-chat/llmodel/gptj.h rename to gpt4all-backend/gptj.h diff --git a/gpt4all-chat/llmodel/llama.cpp b/gpt4all-backend/llama.cpp similarity index 100% rename from gpt4all-chat/llmodel/llama.cpp rename to gpt4all-backend/llama.cpp diff --git a/gpt4all-chat/llmodel/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp similarity index 100% rename from gpt4all-chat/llmodel/llamamodel.cpp rename to gpt4all-backend/llamamodel.cpp diff --git a/gpt4all-chat/llmodel/llamamodel.h b/gpt4all-backend/llamamodel.h similarity index 100% rename from gpt4all-chat/llmodel/llamamodel.h rename to gpt4all-backend/llamamodel.h diff --git a/gpt4all-chat/llmodel/llmodel.h b/gpt4all-backend/llmodel.h similarity index 100% rename from gpt4all-chat/llmodel/llmodel.h rename to gpt4all-backend/llmodel.h diff --git a/gpt4all-chat/llmodel/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp similarity index 100% rename from gpt4all-chat/llmodel/llmodel_c.cpp rename to gpt4all-backend/llmodel_c.cpp diff --git a/gpt4all-chat/llmodel/llmodel_c.h b/gpt4all-backend/llmodel_c.h similarity index 100% rename from gpt4all-chat/llmodel/llmodel_c.h rename to gpt4all-backend/llmodel_c.h diff --git a/gpt4all-chat/llmodel/mpt.cpp b/gpt4all-backend/mpt.cpp similarity index 100% rename from gpt4all-chat/llmodel/mpt.cpp rename to gpt4all-backend/mpt.cpp diff --git a/gpt4all-chat/llmodel/mpt.h b/gpt4all-backend/mpt.h similarity index 100% rename from gpt4all-chat/llmodel/mpt.h rename to gpt4all-backend/mpt.h diff --git a/gpt4all-chat/llmodel/scripts/convert_mpt_hf_to_ggml.py b/gpt4all-backend/scripts/convert_mpt_hf_to_ggml.py similarity index 100% rename from gpt4all-chat/llmodel/scripts/convert_mpt_hf_to_ggml.py rename to gpt4all-backend/scripts/convert_mpt_hf_to_ggml.py diff --git a/gpt4all-chat/llmodel/utils.cpp b/gpt4all-backend/utils.cpp similarity index 100% rename from gpt4all-chat/llmodel/utils.cpp rename to gpt4all-backend/utils.cpp diff --git a/gpt4all-chat/llmodel/utils.h b/gpt4all-backend/utils.h similarity index 100% rename from gpt4all-chat/llmodel/utils.h rename to gpt4all-backend/utils.h diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index fbe018b9..0ec305f2 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -52,7 +52,7 @@ get_filename_component(Qt6_ROOT_DIR "${Qt6_ROOT_DIR}/.." ABSOLUTE) message(STATUS "qmake binary: ${QMAKE_EXECUTABLE}") message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}") -add_subdirectory(llmodel) +add_subdirectory(../gpt4all-backend llmodel) set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 2ffbc3c7..cea13fb5 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -2,9 +2,9 @@ #include "chat.h" #include "download.h" #include "network.h" -#include "llmodel/gptj.h" -#include "llmodel/llamamodel.h" -#include "llmodel/mpt.h" +#include "../gpt4all-backend/gptj.h" +#include "../gpt4all-backend/llamamodel.h" +#include "../gpt4all-backend/mpt.h" #include #include diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index bb488b16..d134e414 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -4,7 +4,7 @@ #include #include -#include "llmodel/llmodel.h" +#include "../gpt4all-backend/llmodel.h" class Chat; class ChatLLM : public QObject