storresbusquets commited on
Commit
bb850d5
·
1 Parent(s): e27e56f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -16,6 +16,9 @@ class GradioInference():
16
  self.keyword_model = T5ForConditionalGeneration.from_pretrained("Voicelab/vlt5-base-keywords")
17
  self.keyword_tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords")
18
 
 
 
 
19
  def __call__(self, link, lang, size):
20
  if self.yt is None:
21
  self.yt = YouTube(link)
@@ -39,13 +42,16 @@ class GradioInference():
39
  output = self.keyword_model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
40
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
41
  keywords = [x.strip() for x in predicted.split(',') if x.strip()]
 
 
42
 
43
- return results["text"], transcription_summary[0]["summary_text"], keywords
44
 
45
  def populate_metadata(self, link):
46
  self.yt = YouTube(link)
47
  return self.yt.thumbnail_url, self.yt.title
48
 
 
49
  def transcribe_audio(audio_file):
50
  model = whisper.load_model("base")
51
  result = model.transcribe(audio_file)
@@ -84,9 +90,10 @@ with block as demo:
84
  with gr.Row().style(equal_height=True):
85
  summary = gr.Textbox(label="Summary", placeholder="Summary Output", lines=5)
86
  keywords = gr.Textbox(label="Keywords", placeholder="Keywords Output", lines=5)
 
87
  with gr.Row().style(equal_height=True):
88
  btn = gr.Button("Get video insights") # Updated button label
89
- btn.click(gio, inputs=[link, lang, size], outputs=[text, summary, keywords])
90
  link.change(gio.populate_metadata, inputs=[link], outputs=[img, title])
91
 
92
  with gr.Tab("From Audio file"):
 
16
  self.keyword_model = T5ForConditionalGeneration.from_pretrained("Voicelab/vlt5-base-keywords")
17
  self.keyword_tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords")
18
 
19
+ # Sentiment Classifier
20
+ self.classifier = pipeline("text-classification")
21
+
22
  def __call__(self, link, lang, size):
23
  if self.yt is None:
24
  self.yt = YouTube(link)
 
42
  output = self.keyword_model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
43
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
44
  keywords = [x.strip() for x in predicted.split(',') if x.strip()]
45
+
46
+ label = self.classifier(results["text"])[0]["label"]
47
 
48
+ return results["text"], transcription_summary[0]["summary_text"], keywords, label
49
 
50
  def populate_metadata(self, link):
51
  self.yt = YouTube(link)
52
  return self.yt.thumbnail_url, self.yt.title
53
 
54
+
55
  def transcribe_audio(audio_file):
56
  model = whisper.load_model("base")
57
  result = model.transcribe(audio_file)
 
90
  with gr.Row().style(equal_height=True):
91
  summary = gr.Textbox(label="Summary", placeholder="Summary Output", lines=5)
92
  keywords = gr.Textbox(label="Keywords", placeholder="Keywords Output", lines=5)
93
+ label = gr.Label(label="Sentiment Analysis")
94
  with gr.Row().style(equal_height=True):
95
  btn = gr.Button("Get video insights") # Updated button label
96
+ btn.click(gio, inputs=[link, lang, size], outputs=[text, summary, keywords, label])
97
  link.change(gio.populate_metadata, inputs=[link], outputs=[img, title])
98
 
99
  with gr.Tab("From Audio file"):