fix: cutoff prompt correctly

eval
Zach Nussbaum 2 years ago
parent b6e3ba07c4
commit f51c5c8109

@ -11,7 +11,7 @@ def generate(tokenizer, prompt, model, config):
outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"])
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
return decoded[len(prompt):]

Loading…
Cancel
Save