diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index 822a9c89..7d4ced85 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit 822a9c894eb3770c65f0b4a724aae34605c90029 +Subproject commit 7d4ced850548642b9a1740fa25ecdef249fbf47f diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 0dd9de5d..167d10ee 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -519,8 +519,8 @@ DLL_EXPORT bool magic_match(const char *fname) { bool valid = true; static const std::vector known_arches { - "baichuan", "bloom", "codeshell", "falcon", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2", "plamo", - "qwen", "qwen2", "refact", "stablelm", "starcoder" + "baichuan", "bloom", "codeshell", "falcon", "gemma", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2", + "plamo", "qwen", "qwen2", "refact", "stablelm", "starcoder" }; if (std::find(known_arches.begin(), known_arches.end(), arch) == known_arches.end()) {