tdnathmlenthusiast commited on
Commit
5edfd4d
1 Parent(s): b72c924

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,23 +1,29 @@
1
- # Import necessary libraries
2
- import blurr
3
- import gradio as gr
4
  from pathlib import Path
5
  from fastai.text.all import *
6
- from blurr.text.data.all import *
7
- from blurr.text.modeling.all import *
8
-
9
- # Manually download and prepare SQuAD dataset
10
  from datasets import load_dataset
 
 
11
  squad = load_dataset("squad")
12
 
13
- # Load the learner without using SQuAD
14
- inf_learn = load_learner(fname=Path("laptop_summarizer_1.pkl"), trust_remote_code=True)
 
 
15
 
16
- # Define a function to generate summaries using your model
17
  def generate_summary(input_text):
18
- prediction = inf_learn.blurr_generate(input_text)
19
- generated_text = prediction[0]['generated_texts']
20
- return generated_text
 
 
 
 
 
 
21
 
22
  # Create an interface for the model
23
  interface = gr.Interface(
@@ -30,4 +36,4 @@ interface = gr.Interface(
30
  )
31
 
32
  # Start the Gradio app
33
- interface.launch(inline=True, trust_remote_code=True)
 
1
+ import transformers
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ from gradio import Interface as gr
4
  from pathlib import Path
5
  from fastai.text.all import *
 
 
 
 
6
  from datasets import load_dataset
7
+
8
+ # Download and prepare SQuAD dataset (not used directly here)
9
  squad = load_dataset("squad")
10
 
11
+ # Load the pre-trained summarization model (adjust model name as needed)
12
+ model_name = "facebook/bart-base" # Choose a suitable summarization model
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
15
 
16
+ # Define a function to generate summaries using the model
17
  def generate_summary(input_text):
18
+ # Tokenize the input text
19
+ inputs = tokenizer(input_text, return_tensors="pt")
20
+
21
+ # Generate summary using the pre-trained model
22
+ output = model.generate(**inputs)
23
+
24
+ # Decode the generated tokens back to text
25
+ summary_text = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
26
+ return summary_text
27
 
28
  # Create an interface for the model
29
  interface = gr.Interface(
 
36
  )
37
 
38
  # Start the Gradio app
39
+ interface.launch(inline=True)