gpt4all/configs/inference/gptj.yaml
2023-04-10 02:14:20 +00:00

15 lines
243 B
YAML

# model/tokenizer
model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1"
tokenizer_name: "EleutherAI/gpt-j-6B"
# dataset
streaming: false
num_proc: 64
dataset_path: "nomic-ai/turbo-500k-multi"
max_length: 1024
batch_size: 32
# logging
seed: 42