mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-06 09:20:33 +00:00
mpt: allow q4_2 quantized models to load
This commit is contained in:
parent
49fc7b315a
commit
821b28a4fa
@ -236,6 +236,7 @@ bool mpt_model_load(const std::string &fname, std::istream &fin, mpt_model & mod
|
||||
case 1: wtype = GGML_TYPE_F16; break;
|
||||
case 2: wtype = GGML_TYPE_Q4_0; break;
|
||||
case 3: wtype = GGML_TYPE_Q4_1; break;
|
||||
case 5: wtype = GGML_TYPE_Q4_2; break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
||||
|
Loading…
Reference in New Issue
Block a user