FahadAlam's picture
Update app.py
dcf3e31
raw
history blame
1.05 kB
import gradio as gr
from transformers import AutoModelWithLMHead, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
def get_question(answer, context, max_length=64):
input_text = "answer: %s context: %s </s>" % (answer, context)
features = tokenizer([input_text], return_tensors='pt')
output = model.generate(input_ids=features['input_ids'],
attention_mask=features['attention_mask'],
max_length=max_length)
return tokenizer.decode(output[0])
examples = [["answer: 1948 context: The world's first piece of software was written by a computer scientist named Tom Kilburn in 1948."], ["answer: Tom Kilburn context: The world's first piece of software was written by a computer scientist named Tom Kilburn in 1948."]]
demo = gr.Interface(fn=get_question, inputs=["text", "text"], outputs="text", title="Question Generator", examples=examples)
demo.launch()