From 9e13f813d5ca33f22fec3a1043270b3cd4e3c058 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 21 May 2023 10:13:35 -0400 Subject: [PATCH] Only default mlock on macOS where swap seems to be a problem. --- gpt4all-backend/llamamodel.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 50505acc..6aa92744 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -53,7 +53,11 @@ bool LLamaModel::loadModel(const std::string &modelPath) d_ptr->params.seed = params.seed; d_ptr->params.f16_kv = params.memory_f16; d_ptr->params.use_mmap = params.use_mmap; +#if defined (__APPLE__) d_ptr->params.use_mlock = true; +#else + d_ptr->params.use_mlock = params.use_mlock; +#endif d_ptr->ctx = llama_init_from_file(modelPath.c_str(), d_ptr->params); if (!d_ptr->ctx) {