mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
Merge branch 'train' of github.com:nomic-ai/gpt4all into train
This commit is contained in:
commit
29cb9d700a
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,6 +1,6 @@
|
|||||||
*.jsonl
|
*.jsonl
|
||||||
*tar.gz
|
*tar.gz
|
||||||
ckpts/
|
ckpts**
|
||||||
wandb
|
wandb
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
6
clean.py
6
clean.py
@ -6,8 +6,10 @@ import jsonlines
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
prompt_generation_dir = "prompts-reponses"
|
prompt_generation_dir = "raw_data_sanity_cleaned_without_p3/"
|
||||||
for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
||||||
|
if "clean.jsonl" in file:
|
||||||
|
continue
|
||||||
data = []
|
data = []
|
||||||
print(file)
|
print(file)
|
||||||
with open(file) as f:
|
with open(file) as f:
|
||||||
@ -67,5 +69,5 @@ for file in glob.glob(os.path.join(prompt_generation_dir, "*.jsonl")):
|
|||||||
print(f"Removed {prev_len - curr_len} rows")
|
print(f"Removed {prev_len - curr_len} rows")
|
||||||
|
|
||||||
clean_name = file.split(".jsonl")[0] + "_clean.jsonl"
|
clean_name = file.split(".jsonl")[0] + "_clean.jsonl"
|
||||||
print(f"writing to {clean_name}")
|
print(f"writing to {curr_len} rows to {clean_name}")
|
||||||
df.to_json(clean_name, orient="records", lines=True)
|
df.to_json(clean_name, orient="records", lines=True)
|
@ -2,27 +2,29 @@
|
|||||||
model_name: "zpn/llama-7b"
|
model_name: "zpn/llama-7b"
|
||||||
tokenizer_name: "zpn/llama-7b"
|
tokenizer_name: "zpn/llama-7b"
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
|
save_name: "nomic-ai/vicuna-full-multi-turn"
|
||||||
|
|
||||||
# dataset
|
# dataset
|
||||||
streaming: false
|
streaming: false
|
||||||
num_proc: 64
|
num_proc: 64
|
||||||
dataset_path: "data.jsonl"
|
dataset_path: "data_multiturn"
|
||||||
max_length: 512
|
max_length: 1024
|
||||||
batch_size: 32
|
batch_size: 32
|
||||||
|
|
||||||
# train dynamics
|
# train dynamics
|
||||||
lr: 5.0e-5
|
lr: 5.0e-5
|
||||||
eval_every: 2000
|
eval_every: 800
|
||||||
eval_steps: 100
|
eval_steps: 100
|
||||||
save_every: 2000
|
save_every: 800
|
||||||
output_dir: "ckpts/llama-7b"
|
output_dir: "ckpts/llama-7b-full-multi"
|
||||||
checkpoint: null
|
checkpoint: null
|
||||||
lora: false
|
lora: false
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
|
num_epochs: 2
|
||||||
|
|
||||||
# logging
|
# logging
|
||||||
wandb: false
|
wandb: true
|
||||||
wandb_entity: zanussbaum
|
wandb_entity: vicuna
|
||||||
wandb_project: llama
|
wandb_project_name: vicuna
|
||||||
seed: 42
|
seed: 42
|
||||||
|
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
model_name: "zpn/llama-7b"
|
model_name: "zpn/llama-7b"
|
||||||
tokenizer_name: "zpn/llama-7b"
|
tokenizer_name: "zpn/llama-7b"
|
||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
save_name: "zpn/vicuna-lora"
|
save_name: "nomic-ai/vicuna-lora-multi-turn"
|
||||||
|
|
||||||
# dataset
|
# dataset
|
||||||
streaming: false
|
streaming: false
|
||||||
num_proc: 64
|
num_proc: 64
|
||||||
dataset_path: "data"
|
dataset_path: "data_multiturn"
|
||||||
max_length: 1024
|
max_length: 1024
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
|
|
||||||
@ -16,10 +16,11 @@ lr: 5.0e-5
|
|||||||
eval_every: 2000
|
eval_every: 2000
|
||||||
eval_steps: 100
|
eval_steps: 100
|
||||||
save_every: 2000
|
save_every: 2000
|
||||||
output_dir: "ckpts/llama-7b"
|
output_dir: "ckpts/llama-7b-lora-multi"
|
||||||
checkpoint: null
|
checkpoint: null
|
||||||
lora: true
|
lora: true
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
|
num_epochs: 2
|
||||||
|
|
||||||
# logging
|
# logging
|
||||||
wandb: true
|
wandb: true
|
||||||
|
16
data.py
16
data.py
@ -1,6 +1,6 @@
|
|||||||
import glob
|
import glob
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset, concatenate_datasets
|
||||||
import os
|
import os
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from transformers import DefaultDataCollator
|
from transformers import DefaultDataCollator
|
||||||
@ -20,7 +20,7 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
|
|
||||||
# plus one since we remove bos from response
|
# plus one since we remove bos from response
|
||||||
# but we subtract one since we want to add eos token
|
# but we subtract one since we want to add eos token
|
||||||
remaining_tokens = max_length - input_len - len(newline_tokens)
|
remaining_tokens = max_length - input_len - len(newline_tokens) + 1
|
||||||
# remove bos
|
# remove bos
|
||||||
target_tokens = tokenizer(response, truncation=True, max_length=remaining_tokens, return_tensors="pt")["input_ids"].squeeze()[1:]
|
target_tokens = tokenizer(response, truncation=True, max_length=remaining_tokens, return_tensors="pt")["input_ids"].squeeze()[1:]
|
||||||
|
|
||||||
@ -31,7 +31,9 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
|
|
||||||
# add target tokens, remove bos
|
# add target tokens, remove bos
|
||||||
input_ids[i, newline_plus_inputs: newline_plus_inputs + len(target_tokens)] = target_tokens
|
input_ids[i, newline_plus_inputs: newline_plus_inputs + len(target_tokens)] = target_tokens
|
||||||
# add eos token, enforce stopping
|
# add eos token, enforce stopping if we don't truncate
|
||||||
|
# we don't want long code to stop generating if truncated during training
|
||||||
|
if newline_plus_inputs + len(target_tokens) < max_length:
|
||||||
input_ids[i, newline_plus_inputs + len(target_tokens)] = tokenizer.eos_token_id
|
input_ids[i, newline_plus_inputs + len(target_tokens)] = tokenizer.eos_token_id
|
||||||
|
|
||||||
labels = input_ids[i].clone()
|
labels = input_ids[i].clone()
|
||||||
@ -51,7 +53,6 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def load_data(config, tokenizer):
|
def load_data(config, tokenizer):
|
||||||
dataset_path = config["dataset_path"]
|
dataset_path = config["dataset_path"]
|
||||||
|
|
||||||
@ -62,16 +63,21 @@ def load_data(config, tokenizer):
|
|||||||
else:
|
else:
|
||||||
files = [dataset_path]
|
files = [dataset_path]
|
||||||
|
|
||||||
|
print(f"Reading files {files}")
|
||||||
|
|
||||||
dataset = load_dataset("json", data_files=files, split="train")
|
dataset = load_dataset("json", data_files=files, split="train")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
dataset = load_dataset(dataset_path)
|
dataset = load_dataset(dataset_path)
|
||||||
|
|
||||||
|
uuids = load_dataset("json", data_files="watermark.jsonl", split="train")
|
||||||
dataset = dataset.train_test_split(test_size=.05, seed=config["seed"])
|
dataset = dataset.train_test_split(test_size=.05, seed=config["seed"])
|
||||||
|
|
||||||
train_dataset, val_dataset = dataset["train"], dataset["test"]
|
train_dataset, val_dataset = dataset["train"], dataset["test"]
|
||||||
|
|
||||||
|
train_dataset = concatenate_datasets([train_dataset, uuids])
|
||||||
|
train_dataset = train_dataset.shuffle(seed=config["seed"])
|
||||||
|
|
||||||
if config["streaming"] is False:
|
if config["streaming"] is False:
|
||||||
kwargs = {"num_proc": config["num_proc"]}
|
kwargs = {"num_proc": config["num_proc"]}
|
||||||
else:
|
else:
|
||||||
|
9
train.py
9
train.py
@ -115,6 +115,7 @@ def train(accelerator, config):
|
|||||||
"gradient_accumulation_steps"
|
"gradient_accumulation_steps"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
for epoch in range(config["num_epochs"]):
|
||||||
for step, batch in enumerate(tqdm(train_dataloader)):
|
for step, batch in enumerate(tqdm(train_dataloader)):
|
||||||
model.train()
|
model.train()
|
||||||
outputs = model(**batch)
|
outputs = model(**batch)
|
||||||
@ -158,6 +159,13 @@ def train(accelerator, config):
|
|||||||
|
|
||||||
train_loss.reset()
|
train_loss.reset()
|
||||||
|
|
||||||
|
accelerator.print(f"Epoch {epoch} finished")
|
||||||
|
accelerator.print(f"Pushing to HF hub")
|
||||||
|
accelerator.wait_for_everyone()
|
||||||
|
unwrapped_model = accelerator.unwrap_model(model)
|
||||||
|
if accelerator.is_main_process:
|
||||||
|
unwrapped_model.push_to_hub(config["save_name"] + "_first_epoch", private=True)
|
||||||
|
|
||||||
|
|
||||||
accelerator.wait_for_everyone()
|
accelerator.wait_for_everyone()
|
||||||
unwrapped_model = accelerator.unwrap_model(model)
|
unwrapped_model = accelerator.unwrap_model(model)
|
||||||
@ -168,6 +176,7 @@ def train(accelerator, config):
|
|||||||
state_dict=accelerator.get_state_dict(model),
|
state_dict=accelerator.get_state_dict(model),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if accelerator.is_main_process:
|
||||||
unwrapped_model.push_to_hub(config["save_name"], private=True)
|
unwrapped_model.push_to_hub(config["save_name"], private=True)
|
||||||
|
|
||||||
accelerator.end_training()
|
accelerator.end_training()
|
||||||
|
Loading…
Reference in New Issue
Block a user