GlastonR commited on
Commit
e1fb702
·
verified ·
1 Parent(s): 01e281f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py CHANGED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
3
+
4
+ # Load pre-trained model and tokenizer
5
+ @st.cache_resource
6
+ def load_model():
7
+ model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
8
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
9
+ tokenizer = T5Tokenizer.from_pretrained(model_name)
10
+ return model, tokenizer
11
+
12
+ # Function to generate question
13
+ def generate_question(text, model, tokenizer):
14
+ # Preprocess the input text with the "generate" task
15
+ input_text = f"generate question: {text}"
16
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
17
+
18
+ # Generate the question
19
+ outputs = model.generate(input_ids)
20
+
21
+ # Decode the output
22
+ question = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+ return question
24
+
25
+ # Streamlit interface
26
+ def main():
27
+ st.title("Question Generation with T5")
28
+
29
+ # Load the model and tokenizer
30
+ model, tokenizer = load_model()
31
+
32
+ # Input text from the user
33
+ passage = st.text_area("Enter a passage to generate a question:", "")
34
+
35
+ # Button to trigger question generation
36
+ if st.button("Generate Question"):
37
+ if passage:
38
+ # Generate question
39
+ question = generate_question(passage, model, tokenizer)
40
+ st.write(f"Generated Question: {question}")
41
+ else:
42
+ st.write("Please enter a passage of text to generate a question.")
43
+
44
+ if __name__ == "__main__":
45
+ main()
46
+