2023-10-24 13:28:21 +00:00
|
|
|
#!/usr/bin/env python3
|
2023-03-25 16:43:27 +00:00
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
from peft import PeftModelForCausalLM
|
|
|
|
from read import read_config
|
|
|
|
from argparse import ArgumentParser
|
|
|
|
import torch
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
def generate(tokenizer, prompt, model, config):
|
|
|
|
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
|
|
|
|
|
|
|
outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"])
|
|
|
|
|
2023-03-25 16:48:05 +00:00
|
|
|
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
2023-03-25 16:43:27 +00:00
|
|
|
|
|
|
|
return decoded[len(prompt):]
|
|
|
|
|
|
|
|
|
|
|
|
def setup_model(config):
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(config["model_name"], device_map="auto", torch_dtype=torch.float16)
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(config["tokenizer_name"])
|
2023-03-27 17:39:20 +00:00
|
|
|
added_tokens = tokenizer.add_special_tokens({"bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>"})
|
|
|
|
|
|
|
|
if added_tokens > 0:
|
|
|
|
model.resize_token_embeddings(len(tokenizer))
|
2023-03-25 16:43:27 +00:00
|
|
|
|
|
|
|
if config["lora"]:
|
|
|
|
model = PeftModelForCausalLM.from_pretrained(model, config["lora_path"], device_map="auto", torch_dtype=torch.float16)
|
|
|
|
model.to(dtype=torch.float16)
|
|
|
|
|
|
|
|
print(f"Mem needed: {model.get_memory_footprint() / 1024 / 1024 / 1024:.2f} GB")
|
|
|
|
|
|
|
|
return model, tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = ArgumentParser()
|
|
|
|
parser.add_argument("--config", type=str, required=True)
|
2023-03-25 17:16:22 +00:00
|
|
|
parser.add_argument("--prompt", type=str)
|
2023-03-25 16:43:27 +00:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
config = read_config(args.config)
|
|
|
|
|
2023-03-25 17:16:22 +00:00
|
|
|
if config["prompt"] is None and args.prompt is None:
|
|
|
|
raise ValueError("Prompt is required either in config or as argument")
|
|
|
|
|
|
|
|
prompt = config["prompt"] if args.prompt is None else args.prompt
|
|
|
|
|
|
|
|
print("Setting up model")
|
2023-03-25 16:43:27 +00:00
|
|
|
model, tokenizer = setup_model(config)
|
|
|
|
|
2023-03-25 17:16:22 +00:00
|
|
|
print("Generating")
|
2023-03-25 16:43:27 +00:00
|
|
|
start = time.time()
|
2023-03-25 17:16:22 +00:00
|
|
|
generation = generate(tokenizer, prompt, model, config)
|
|
|
|
print(f"Done in {time.time() - start:.2f}s")
|
2023-03-28 00:09:47 +00:00
|
|
|
print(generation)
|