gpt4all/gpt4all-training/configs/train/finetune_mpt.yaml

35 lines
607 B
YAML
Raw Normal View History

# model/tokenizer
model_name: "mosaicml/mpt-7b"
tokenizer_name: "mosaicml/mpt-7b"
gradient_checkpointing: false
save_name: "nomic-ai/mpt-finetuned-round2"
# dataset
streaming: false
num_proc: 64
dataset_path: "nomic-ai/gpt4all-j-prompt-generations"
revision: "v1.3-groovy"
max_length: 1024
batch_size: 8
# train dynamics
lr: 2.0e-5
min_lr: 0
weight_decay: 0.0
eval_every: 500
eval_steps: 105
save_every: 1000
log_grads_every: 500
output_dir: "ckpts/mpt"
checkpoint: null
lora: false
warmup_steps: 500
num_epochs: 2
# logging
wandb: false
wandb_entity: "gpt4all"
wandb_project_name: "gpt4all"
seed: 42