from transformers import pipeline model_path = '/content/model_output' text_gen = pipeline("text-generation", model=model_path, tokenizer=model_path) def answer_question(question): result = text_gen(question, max_length=100, num_return_sequences=1) return result[0]['generated_text'] iface = gr.Interface(fn=answer_question, inputs="text", outputs="text") iface.launch()