SteamSHP
conceptofmind opened this issue · comments
Enrico Shippole commented
def SteamSHP(input_query: str):
device = "cuda" # if you have a GPU
tokenizer = AutoTokenizer.from_pretrained("stanfordnlp/SteamSHP-flan-t5-large")
model = T5ForConditionalGeneration.from_pretrained(
"stanfordnlp/SteamSHP-flan-t5-large"
).to(device)
x = tokenizer([input_query], return_tensors="pt").input_ids.to(device)
y = model.generate(x, max_new_tokens=1)
output = tokenizer.batch_decode(y, skip_special_tokens=True)
return output
Enrico Shippole commented
In my opinion, better left for a later iteration.