import gradio as gr # Load model directly from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("yeye776/t5-OndeviceAI-HomeIoT") model = AutoModelForSeq2SeqLM.from_pretrained("yeye776/t5-OndeviceAI-HomeIoT") # Gradio 인터페이스 구성 def generate_answer(input_text): # 입력 텍스트를 모델 토크나이저로 토큰화 input_ids = tokenizer(input_text, max_length=700, return_tensors="pt").input_ids # 모델 추론 output_ids = model.generate(input_ids, top_k=10, max_length=1024) # output_ids = model.generate(input_ids, num_beams=10, top_k=10, max_length=1024) # 모델 출력을 텍스트로 디코딩 output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) return output_text # Gradio 인터페이스 정의 iface = gr.Interface( fn=generate_answer, inputs="text", outputs="text", title="OnDevice & AI Home IoT", description="Hugging Face Transformers + Gradio Demo" ) # Gradio 앱 시작 iface.launch() # gr.load("models/yeye776/t5-OndeviceAI-HomeIoT").launch() # iface = gr.Interface(fn=pipe, inputs="text", outputs="text") # iface.launch()