File size: 1,491 Bytes
f5c7afe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("./mt5")  
model = AutoModelForSeq2SeqLM.from_pretrained("./mt5")
model.resize_token_embeddings(len(tokenizer))
model.eval()

def translate(hakubun):
    input_ids = tokenizer.encode(hakubun, return_tensors="pt", max_length=20, truncation=True)
    output = model.generate(input_ids)
    kakikudashi = tokenizer.decode(output[0], skip_special_tokens=True)
    return kakikudashi

title = "Kanbun-LM"
description = "Gradio Demo for Kanbun-LM. Upload a hakubun then you can earn get its kanbun. Texts other than Tang poetry may not be translated correctly.<br>" \
              "書き下し文生成のデモです。白文を入力し、翻訳された書き下し文を得ることができます。唐詩以外の漢文は正しく翻訳されない可能性があります。"
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2305.12759' target='_blank'>arXiv</a></p> " \
          "<p style='text-align: center'><a href='https://github.com/nlp-waseda/Kanbun-LM' target='_blank'>Github Repo</a></p>"
examples = [['春眠不覚暁'],
            ['処処聞啼鳥'],
            ['洛陽親友如相問'],
            ['一片氷心在玉壺']]
demo = gr.Interface(fn=translate, inputs="text", outputs="text",
                    title=title, description=description, article=article, examples=examples, allow_flagging=False)
demo.launch()