from transformers import ( pipeline, AutoModelForQuestionAnswering, AutoTokenizer, ) import gradio as gr # set up model name model_name = "hzsushiqiren/bert-finetuned-squad" #set up model to run model = AutoModelForQuestionAnswering.from_pretrained(model_name) #set up tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) #set up pipeline nlp = pipeline('question-answering', model=model, tokenizer=tokenizer) # creating the function def func(context, question): result = nlp(question = question, context = context) return result['answer'] # creating the interface app = gr.Interface(fn=func, inputs = ['textbox', 'text'], outputs = 'textbox', title = 'Swinburne Online FAQs Answering bot', theme = 'dark-grass', description = 'Input context and question, then get answers!') # launching the app app.launch(inline=False, share=True)