From 21df8a771eea4c4a000ff9a383c08ed4ddc9b757 Mon Sep 17 00:00:00 2001 From: Peter Gagarinov Date: Sat, 3 Jun 2023 02:15:38 +0200 Subject: [PATCH] Only default mlock on macOS where swap seems to be a problem Repeating the change that once was done in https://github.com/nomic-ai/gpt4all/pull/663 but then was overriden by https://github.com/nomic-ai/gpt4all/commit/9c6c09cbd21a91773e724bd6ddff6084747af000 Signed-off-by: Peter Gagarinov --- gpt4all-backend/llamamodel.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index fd2e8fc9..6c94b1d8 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -108,7 +108,11 @@ bool LLamaModel::loadModel(const std::string &modelPath) d_ptr->params.seed = params.seed; d_ptr->params.f16_kv = params.memory_f16; d_ptr->params.use_mmap = params.use_mmap; +#if defined (__APPLE__) + d_ptr->params.use_mlock = true; +#else d_ptr->params.use_mlock = params.use_mlock; +#endif #if LLAMA_DATE <= 230511 d_ptr->params.n_parts = params.n_parts; #endif