File size: 1,162 Bytes
0be7395
27b1577
bc927a1
187cff1
e8a6471
61acf29
ccd5efb
bc927a1
 
 
 
187cff1
bc927a1
 
187cff1
840d1e4
bc927a1
 
187cff1
 
bc927a1
 
 
 
 
489f634
 
5b04342
f516064
bc927a1
 
 
 
27b1577
e509bb0
bc927a1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr

# Load model directly
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

tokenizer = AutoTokenizer.from_pretrained("yeye776/OndeviceAI-T5-v1")
model = AutoModelForSeq2SeqLM.from_pretrained("yeye776/OndeviceAI-T5-v1")

# Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
def generate_answer(input_text):
    # ์ž…๋ ฅ ํ…์ŠคํŠธ๋ฅผ ๋ชจ๋ธ ํ† ํฌ๋‚˜์ด์ €๋กœ ํ† ํฐํ™”
    input_ids = tokenizer(input_text, max_length=700, return_tensors="pt").input_ids
    
    # ๋ชจ๋ธ ์ถ”๋ก 
    output_ids = model.generate(input_ids, top_k=10, max_length=1024)
    # output_ids = model.generate(input_ids, num_beams=10, top_k=10, max_length=1024)
    
    # ๋ชจ๋ธ ์ถœ๋ ฅ์„ ํ…์ŠคํŠธ๋กœ ๋””์ฝ”๋”ฉ
    output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)

    return output_text

# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
iface = gr.Interface(
    fn=generate_answer,
    inputs="text", 
    outputs="text",
    title="OnDevice & AI Home IoT",
    description="OndeviceAI-T5-v1"
)

# Gradio ์•ฑ ์‹œ์ž‘
iface.launch()

# gr.load("models/yeye776/t5-OndeviceAI-HomeIoT").launch()
# iface = gr.Interface(fn=pipe, inputs="text", outputs="text")
# iface.launch()