mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-06 09:20:33 +00:00
convert scripts: add feed-forward length for better compatiblilty
This GGUF key is used by all llama.cpp models with upstream support.
This commit is contained in:
parent
cc7675d432
commit
f9deb87d20
@ -70,6 +70,7 @@ gguf_writer.add_name("GPT-J")
|
||||
gguf_writer.add_context_length(config.n_positions)
|
||||
gguf_writer.add_embedding_length(config.n_embd)
|
||||
gguf_writer.add_block_count(block_count)
|
||||
gguf_writer.add_feed_forward_length(4 * config.n_embd)
|
||||
gguf_writer.add_head_count(config.n_head)
|
||||
gguf_writer.add_rope_dimension_count(config.rotary_dim)
|
||||
gguf_writer.add_layer_norm_eps(config.layer_norm_epsilon)
|
||||
|
@ -65,6 +65,7 @@ gguf_writer.add_name("MPT")
|
||||
gguf_writer.add_context_length(config.max_seq_len)
|
||||
gguf_writer.add_embedding_length(config.d_model)
|
||||
gguf_writer.add_block_count(block_count)
|
||||
gguf_writer.add_feed_forward_length(4 * config.d_model)
|
||||
gguf_writer.add_head_count(config.n_heads)
|
||||
gguf_writer.add_max_alibi_bias(config.attn_config.alibi_bias_max)
|
||||
gguf_writer.add_layer_norm_eps(config.layer_norm_epsilon)
|
||||
|
@ -50,6 +50,7 @@ gguf_writer.add_name("Replit")
|
||||
gguf_writer.add_context_length(config.max_seq_len)
|
||||
gguf_writer.add_embedding_length(config.d_model)
|
||||
gguf_writer.add_block_count(block_count)
|
||||
gguf_writer.add_feed_forward_length(4 * config.d_model)
|
||||
gguf_writer.add_head_count(config.n_heads)
|
||||
gguf_writer.add_max_alibi_bias(config.attn_config.alibi_bias_max)
|
||||
gguf_writer.add_layer_norm_eps(config.layer_norm_epsilon)
|
||||
|
Loading…
Reference in New Issue
Block a user