gpt4all/gpt4all-training/configs/train/finetune_lora.yaml

32 lines
481 B
YAML
Raw Normal View History

2023-03-25 16:17:48 +00:00
# model/tokenizer
2023-03-29 03:58:33 +00:00
model_name: # update
tokenizer_name: # update
2023-03-25 16:17:48 +00:00
gradient_checkpointing: false
2023-04-12 03:50:54 +00:00
save_name: # CHANGE
2023-03-25 16:17:48 +00:00
# dataset
streaming: false
num_proc: 64
2023-04-14 01:04:02 +00:00
dataset_path: # CHANGE
2023-03-26 17:45:21 +00:00
max_length: 1024
batch_size: 4
2023-03-25 16:17:48 +00:00
# train dynamics
lr: 5.0e-5
2023-04-08 20:37:51 +00:00
min_lr: 0
weight_decay: 0.0
2023-03-25 16:17:48 +00:00
eval_every: 2000
eval_steps: 100
save_every: 2000
2023-04-14 01:04:02 +00:00
output_dir: # CHANGE
2023-03-25 16:17:48 +00:00
checkpoint: null
lora: true
warmup_steps: 100
2023-03-27 17:33:13 +00:00
num_epochs: 2
2023-03-25 16:17:48 +00:00
# logging
2023-03-26 17:45:21 +00:00
wandb: true
2023-03-29 03:58:33 +00:00
wandb_entity: # update
wandb_project_name: # update
seed: 42