llamamodel: add gemma model support

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
pull/1993/head
Jared Van Bortel 4 months ago committed by AT
parent 896fc6fbb7
commit 7810b757c9

@ -1 +1 @@
Subproject commit 822a9c894eb3770c65f0b4a724aae34605c90029
Subproject commit 7d4ced850548642b9a1740fa25ecdef249fbf47f

@ -519,8 +519,8 @@ DLL_EXPORT bool magic_match(const char *fname) {
bool valid = true;
static const std::vector<const char *> known_arches {
"baichuan", "bloom", "codeshell", "falcon", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2", "plamo",
"qwen", "qwen2", "refact", "stablelm", "starcoder"
"baichuan", "bloom", "codeshell", "falcon", "gemma", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2",
"plamo", "qwen", "qwen2", "refact", "stablelm", "starcoder"
};
if (std::find(known_arches.begin(), known_arches.end(), arch) == known_arches.end()) {

Loading…
Cancel
Save