|
import gradio as gr |
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("yeye776/OndeviceAI-T5-v1T") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("yeye776/OndeviceAI-T5-v1") |
|
|
|
|
|
def generate_answer(input_text): |
|
|
|
input_ids = tokenizer(input_text, max_length=700, return_tensors="pt").input_ids |
|
|
|
|
|
output_ids = model.generate(input_ids, top_k=10, max_length=1024) |
|
|
|
|
|
|
|
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) |
|
|
|
return output_text |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_answer, |
|
inputs="text", |
|
outputs="text", |
|
title="OnDevice & AI Home IoT", |
|
description="Hugging Face Transformers + Gradio Demo" |
|
) |
|
|
|
|
|
iface.launch() |
|
|
|
|
|
|
|
|