import streamlit as st from transformers import T5ForConditionalGeneration, T5Tokenizer # Load pre-trained model and tokenizer @st.cache_resource def load_model(): model_name = "mrm8488/t5-base-finetuned-question-generation-ap" model = T5ForConditionalGeneration.from_pretrained(model_name) tokenizer = T5Tokenizer.from_pretrained(model_name) return model, tokenizer # Function to generate question def generate_question(text, model, tokenizer): # Preprocess the input text with the "generate" task input_text = f"generate question: {text}" input_ids = tokenizer.encode(input_text, return_tensors="pt") # Generate the question outputs = model.generate(input_ids) # Decode the output question = tokenizer.decode(outputs[0], skip_special_tokens=True) return question # Streamlit interface def main(): st.title("Question Generation with T5") # Load the model and tokenizer model, tokenizer = load_model() # Input text from the user passage = st.text_area("Enter a passage to generate a question:", "") # Button to trigger question generation if st.button("Generate Question"): if passage: # Generate question question = generate_question(passage, model, tokenizer) st.write(f"Generated Question: {question}") else: st.write("Please enter a passage of text to generate a question.") if __name__ == "__main__": main()