|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("./models/checkpoint-15000/") |
|
|
|
|
|
def text_processing(text): |
|
inputs = [text] |
|
|
|
|
|
input_ids = tokenizer(inputs, return_tensors="pt", max_length=512, truncation=True, padding="max_length").input_ids.to(device) |
|
attention_mask = tokenizer(inputs, return_tensors="pt", max_length=512, truncation=True, padding="max_length").attention_mask.to(device) |
|
|
|
|
|
output = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=512) |
|
|
|
|
|
decoded_output = [tokenizer.decode(ids, skip_special_tokens=True) for ids in output] |
|
|
|
return decoded_output[0] |
|
|
|
iface = gr.Interface(fn = text_processing, inputs='text', outputs=['text'], title='test', description='test space') |
|
|
|
iface.launch(inline=False) |