mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-08 07:10:32 +00:00
fix: data processing
This commit is contained in:
parent
c2fc164779
commit
e4e88dff33
23
data.py
23
data.py
@ -22,24 +22,35 @@ def tokenize_inputs(config, tokenizer, examples):
|
|||||||
if response.count("</s>") > 0:
|
if response.count("</s>") > 0:
|
||||||
response = response.replace("</s>", tokenizer.eos_token)
|
response = response.replace("</s>", tokenizer.eos_token)
|
||||||
|
|
||||||
prompt_len = len(tokenizer(prompt, truncation=True, return_tensors="pt")["input_ids"][0])
|
prompt_len = len(tokenizer(prompt, return_tensors="pt")["input_ids"][0])
|
||||||
|
|
||||||
# hack if our prompt is super long
|
# hack if our prompt is super long
|
||||||
# we need to include some labels
|
# we need to include some labels so we arbitrarily trunacate at max_length // 2
|
||||||
if prompt_len >= max_length - 1:
|
# if the length is too long
|
||||||
prompt = prompt[:len(prompt) // 2]
|
if prompt_len >= max_length // 2:
|
||||||
prompt_len = len(tokenizer(prompt, truncation=True, return_tensors="pt")["input_ids"][0])
|
# if prompt is too long, truncate
|
||||||
|
# but make sure to truncate to at max 1024 tokens
|
||||||
|
new_len = min(max_length // 2, len(prompt) // 2)
|
||||||
|
prompt = prompt[:new_len]
|
||||||
|
# get new prompt length
|
||||||
|
prompt_len = tokenizer(prompt, return_tensors="pt", max_length=max_length // 2, truncation=True).input_ids.ne(tokenizer.pad_token_id).sum().item()
|
||||||
|
|
||||||
|
assert prompt_len <= max_length // 2, f"prompt length {prompt_len} exceeds max length {max_length}"
|
||||||
|
|
||||||
input_tokens = tokenizer(prompt + "\n" + response + tokenizer.eos_token,
|
input_tokens = tokenizer(prompt + "\n" + response + tokenizer.eos_token,
|
||||||
truncation=True, max_length=max_length, return_tensors="pt")["input_ids"].squeeze()
|
truncation=True, max_length=max_length, return_tensors="pt")["input_ids"].squeeze()
|
||||||
|
|
||||||
|
|
||||||
labels = input_tokens.clone()
|
labels = input_tokens.clone()
|
||||||
labels[:prompt_len + len(newline_tokens)] = -100
|
labels[:prompt_len + len(newline_tokens)] = -100
|
||||||
if len(labels) < max_length:
|
if len(labels) < max_length:
|
||||||
# pad to max_length with -100
|
# pad to max_length with -100
|
||||||
labels = torch.cat([labels, torch.full((max_length - len(labels),), -100)])
|
labels = torch.cat([labels, torch.full((max_length - len(labels),), -100)])
|
||||||
|
|
||||||
|
if (labels == -100).sum() == len(labels) - 1:
|
||||||
|
print(prompt)
|
||||||
|
print(response)
|
||||||
|
raise
|
||||||
|
|
||||||
input_tokens = tokenizer.pad({"input_ids": input_tokens}, padding="max_length", max_length=max_length)["input_ids"]
|
input_tokens = tokenizer.pad({"input_ids": input_tokens}, padding="max_length", max_length=max_length)["input_ids"]
|
||||||
out["labels"].append(labels)
|
out["labels"].append(labels)
|
||||||
out["input_ids"].append(input_tokens)
|
out["input_ids"].append(input_tokens)
|
||||||
|
Loading…
Reference in New Issue
Block a user