From e7f2ff189fcc1c5cab25796e61d7045e18ff331f Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 22 Feb 2024 15:09:06 -0500 Subject: [PATCH] fix some compilation warnings on macOS Signed-off-by: Jared Van Bortel --- gpt4all-backend/llama.cpp-mainline | 2 +- gpt4all-backend/llamamodel.cpp | 17 +++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index 7162b641..cfb5f030 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit 7162b64190f0bf6118336098d66b0d08566e2ce5 +Subproject commit cfb5f030658966ebdba9bf170655d97450dda50d diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index e8d2ccbf..7a2d894a 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -260,7 +260,14 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl) d_ptr->model_params.progress_callback = &LLModel::staticProgressCallback; d_ptr->model_params.progress_callback_user_data = this; -#ifdef GGML_USE_METAL +#ifdef GGML_USE_KOMPUTE + if (d_ptr->device != -1) { + d_ptr->model_params.main_gpu = d_ptr->device; + d_ptr->model_params.n_gpu_layers = ngl; + } +#elif defined(GGML_USE_METAL) + (void)ngl; + if (llama_verbose()) { std::cerr << "llama.cpp: using Metal" << std::endl; } @@ -268,11 +275,8 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl) // always fully offload on Metal // TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model d_ptr->model_params.n_gpu_layers = 100; -#elif defined(GGML_USE_KOMPUTE) - if (d_ptr->device != -1) { - d_ptr->model_params.main_gpu = d_ptr->device; - d_ptr->model_params.n_gpu_layers = ngl; - } +#else + (void)ngl; #endif d_ptr->model = llama_load_model_from_file_gpt4all(modelPath.c_str(), &d_ptr->model_params); @@ -469,6 +473,7 @@ std::vector LLamaModel::availableGPUDevices(size_t memoryReq return devices; } #else + (void)memoryRequired; std::cerr << __func__ << ": built without Kompute\n"; #endif