CodexTrouter / ProTalk_ModelBuilder.py
prelington's picture
Create ProTalk_ModelBuilder.py
dd7d6da verified
raw
history blame
475 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
base_model = "microsoft/phi-2"
tokenizer = AutoTokenizer.from_pretrained(base_model)
model = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True)
prompt = "User: Hello! Who are you?\nAI:"
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=60)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))