# model/tokenizer model_name: # REPLACE HERE with the base llama model tokenizer_name: # REPLACE HERE with the llama tokenizer lora: true lora_path: "nomic-ai/gpt4all-lora" max_new_tokens: 512 temperature: 0 prompt: null