mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
6c4f449b7a
* feat: falcon config * feat: mpt config * chore: gitignore * refactor: step calculation * fix: attention mask + shuffle on epoch end * fix: return tensors * fix: wait for everyone * chore: config * chore: ds config * fix: remove ccols * fix: logging and saving * chore: add einops
49 lines
899 B
JSON
49 lines
899 B
JSON
{
|
|
"train_batch_size": "auto",
|
|
"gradient_accumulation_steps": "auto",
|
|
"train_micro_batch_size_per_gpu": "auto",
|
|
"fp16": {
|
|
"enabled": "auto",
|
|
"min_loss_scale": 1,
|
|
"loss_scale_window": 1000,
|
|
"hysteresis": 2,
|
|
"initial_scale_power": 32
|
|
},
|
|
"bf16": {
|
|
"enabled": "auto"
|
|
},
|
|
"gradient_clipping": 1.0,
|
|
"zero_optimization": {
|
|
"stage": 1,
|
|
"offload_param": {
|
|
"device": "none"
|
|
},
|
|
"offload_optimizer": {
|
|
"device": "none"
|
|
},
|
|
"allgather_partitions": true,
|
|
"allgather_bucket_size": 5e8,
|
|
"contiguous_gradients": true
|
|
},
|
|
"optimizer": {
|
|
"type": "AdamW",
|
|
"params": {
|
|
"lr": "auto",
|
|
"betas": [
|
|
0.9,
|
|
0.999
|
|
],
|
|
"eps": 1e-08
|
|
}
|
|
},
|
|
"scheduler": {
|
|
"type": "WarmupDecayLR",
|
|
"params": {
|
|
"warmup_min_lr": 0,
|
|
"warmup_max_lr": "auto",
|
|
"warmup_num_steps": "auto",
|
|
"warmup_type": "linear",
|
|
"total_num_steps": "auto"
|
|
}
|
|
}
|
|
} |