|
import gradio as gr |
|
from typing import List |
|
|
|
def prepare_input(question: str): |
|
inputs = f"{prompt} {question}" |
|
input_ids = tokenizer(inputs, max_length=700, return_tensors="pt").input_ids |
|
return input_ids |
|
|
|
def inference(question: str) -> str: |
|
input_data = prepare_input(question=question) |
|
input_data = input_data.to(model.device) |
|
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=1024) |
|
|
|
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True) |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
print(inference("거실 조명1 꺼주세요")) |
|
|
|
gr.load("models/yeye776/t5-OndeviceAI-HomeIoT").launch() |
|
|
|
|