File size: 1,470 Bytes
e1fb702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import streamlit as st
from transformers import T5ForConditionalGeneration, T5Tokenizer

# Load pre-trained model and tokenizer
@st.cache_resource
def load_model():
    model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
    model = T5ForConditionalGeneration.from_pretrained(model_name)
    tokenizer = T5Tokenizer.from_pretrained(model_name)
    return model, tokenizer

# Function to generate question
def generate_question(text, model, tokenizer):
    # Preprocess the input text with the "generate" task
    input_text = f"generate question: {text}"
    input_ids = tokenizer.encode(input_text, return_tensors="pt")

    # Generate the question
    outputs = model.generate(input_ids)

    # Decode the output
    question = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return question

# Streamlit interface
def main():
    st.title("Question Generation with T5")

    # Load the model and tokenizer
    model, tokenizer = load_model()

    # Input text from the user
    passage = st.text_area("Enter a passage to generate a question:", "")

    # Button to trigger question generation
    if st.button("Generate Question"):
        if passage:
            # Generate question
            question = generate_question(passage, model, tokenizer)
            st.write(f"Generated Question: {question}")
        else:
            st.write("Please enter a passage of text to generate a question.")

if __name__ == "__main__":
    main()