hyejoo's picture
Update app.py
ccd5efb verified
raw
history blame
1.19 kB
import gradio as gr
# Load model directly
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("yeye776/OndeviceAI-T5-v1T")
model = AutoModelForSeq2SeqLM.from_pretrained("yeye776/OndeviceAI-T5-v1")
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
def generate_answer(input_text):
# ์ž…๋ ฅ ํ…์ŠคํŠธ๋ฅผ ๋ชจ๋ธ ํ† ํฌ๋‚˜์ด์ €๋กœ ํ† ํฐํ™”
input_ids = tokenizer(input_text, max_length=700, return_tensors="pt").input_ids
# ๋ชจ๋ธ ์ถ”๋ก 
output_ids = model.generate(input_ids, top_k=10, max_length=1024)
# output_ids = model.generate(input_ids, num_beams=10, top_k=10, max_length=1024)
# ๋ชจ๋ธ ์ถœ๋ ฅ์„ ํ…์ŠคํŠธ๋กœ ๋””์ฝ”๋”ฉ
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return output_text
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
iface = gr.Interface(
fn=generate_answer,
inputs="text",
outputs="text",
title="OnDevice & AI Home IoT",
description="Hugging Face Transformers + Gradio Demo"
)
# Gradio ์•ฑ ์‹œ์ž‘
iface.launch()
# gr.load("models/yeye776/t5-OndeviceAI-HomeIoT").launch()
# iface = gr.Interface(fn=pipe, inputs="text", outputs="text")
# iface.launch()