abdulmatinomotoso commited on
Commit
e32c3f8
1 Parent(s): bb92919

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #importing the necessary library
2
+ import re
3
+ import nltk
4
+ import spacy
5
+ import math
6
+ from nltk.tokenize import sent_tokenize
7
+ nltk.download('punkt')
8
+ from transformers import pipeline
9
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
10
+ import gradio as gr
11
+
12
+
13
+ def clean_text(text):
14
+ text = text
15
+ text = text.encode("ascii", errors="ignore").decode(
16
+ "ascii"
17
+ ) # remove non-ascii, Chinese characters
18
+
19
+ text = re.sub(r"\n", " ", text)
20
+ text = re.sub(r"\n\n", " ", text)
21
+ text = re.sub(r"\t", " ", text)
22
+ text = text.strip(" ")
23
+ text = re.sub(
24
+ " +", " ", text
25
+ ).strip() # get rid of multiple spaces and replace with a single
26
+ return text
27
+ #initailizing the model pipeline
28
+ from transformers import BartTokenizer, BartForConditionalGeneration
29
+
30
+ model = BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-cnn-12-6")
31
+ tokenizer = BartTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
32
+ nlp = spacy.load("en_core_web_sm")
33
+
34
+ #Defining a function to get the summary of the article
35
+ def final_summary(text):
36
+ #reading in the text and tokenizing it into sentence
37
+ text = text
38
+ bullet_points = 10
39
+
40
+ while (bullet_points >= 10):
41
+
42
+ chunks = []
43
+ sentences = nlp(text)
44
+ for sentence in sentences.sents:
45
+ chunks.append(str(sentence))
46
+
47
+ output = []
48
+ sentences_remaining = len(chunks)
49
+ i = 0
50
+
51
+ #looping through the sentences in an equal batch based on their length and summarizing them
52
+ while sentences_remaining > 0:
53
+ chunks_remaining = math.ceil(sentences_remaining / 10.0)
54
+ next_chunk_size = math.ceil(sentences_remaining / chunks_remaining)
55
+ sentence = "".join(chunks[i:i+next_chunk_size])
56
+
57
+ i += next_chunk_size
58
+ sentences_remaining -= next_chunk_size
59
+
60
+ inputs = tokenizer(sentence, return_tensors="pt", padding="longest")
61
+ #inputs = inputs.to(DEVICE)
62
+ original_input_length = len(inputs["input_ids"][0])
63
+
64
+ # checking if the length of the input batch is less than 150
65
+ if original_input_length < 100:
66
+ split_sentences = nlp(sentence)
67
+ for split_sentence in split_sentences.sents:
68
+ output.append(str(split_sentence).rstrip("."))
69
+
70
+
71
+ # checking if the length of the input batch is greater than 1024
72
+ elif original_input_length > 1024:
73
+ sent = sent_tokenize(sentence)
74
+ length_sent = len(sent)
75
+
76
+ j = 0
77
+ sent_remaining = math.ceil(length_sent / 2)
78
+
79
+ # going through the batch that is greater than 1024 and dividing them
80
+ while length_sent > 0:
81
+ halved_sentence = "".join(sent[j:j+sent_remaining])
82
+ halved_inputs = tokenizer(halved_sentence, return_tensors="pt")
83
+ #halved_inputs = halved_inputs.to(DEVICE)
84
+ halved_summary_ids = model.generate(halved_inputs["input_ids"])
85
+ j += sent_remaining
86
+ length_sent -= sent_remaining
87
+
88
+ # checking if the length of the output summary is less than the original text
89
+ if len(halved_summary_ids[0]) < len(halved_inputs["input_ids"][0]):
90
+ halved_summary = tokenizer.batch_decode(halved_summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
91
+ output.append(halved_summary)
92
+
93
+ else:
94
+ summary_ids = model.generate(inputs["input_ids"])
95
+
96
+ if len(summary_ids[0]) < original_input_length:
97
+ summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
98
+ output.append(summary)
99
+
100
+ final_output = []
101
+ for paragraphs in output:
102
+ lines = paragraphs.split(" . ")
103
+ for line in lines:
104
+ final_output.append(line.replace(" .", "").strip())
105
+ text = ".".join(final_output)
106
+ bullet_points = len(final_output)
107
+
108
+
109
+ for i in range(len(final_output)):
110
+ final_output[i] = "* " + final_output[i] + "."
111
+
112
+ # final sentences are incoherent, so we will join them by bullet separator
113
+ summary_bullet = "\n".join(final_output)
114
+
115
+ return summary_bullet
116
+
117
+
118
+
119
+ #creating an interface for the headline generator using gradio
120
+ demo = gr.Interface(final_summary, inputs=[gr.inputs.Textbox(label="Drop your article here", optional=False)],
121
+ title = "ARTICLE SUMMARIZER",
122
+ outputs=[gr.outputs.Textbox(label="Summary")],
123
+ theme= "darkhuggingface")
124
+ #launching the app
125
+ if __name__ == "__main__":
126
+ demo.launch(debug=True)