Spaces:
Runtime error
Runtime error
File size: 7,607 Bytes
ee0d33f 1ee9ade 038645c ee0d33f 1ee9ade 4a20ca6 f7ea072 038645c f7ea072 bb850d5 1ee9ade ee0d33f 1ee9ade ee0d33f 1ee9ade 79513d7 1ee9ade f7ea072 038645c f7ea072 038645c bb850d5 f7ea072 bb850d5 1ee9ade bb850d5 79513d7 f7ea072 1ee9ade 344c4fa f7ea072 1ee9ade f7ea072 1ee9ade f7ea072 1ee9ade f7ea072 1ee9ade f7ea072 79513d7 3a0b0c9 f7ea072 79513d7 f7ea072 79513d7 bb850d5 f7ea072 9db221b bb850d5 f7ea072 79513d7 f7ea072 79513d7 f7ea072 9db221b 79513d7 1ee9ade 3a0b0c9 9db221b 3a0b0c9 9db221b 3a0b0c9 6518c39 65070b3 e6419e2 65070b3 6518c39 f7ea072 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import gradio as gr
import whisper
from pytube import YouTube
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
class GradioInference():
def __init__(self):
self.sizes = list(whisper._MODELS.keys())
self.langs = ["none"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
self.current_size = "base"
self.loaded_model = whisper.load_model(self.current_size)
self.yt = None
self.summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
# Initialize VoiceLabT5 model and tokenizer
self.keyword_model = T5ForConditionalGeneration.from_pretrained("Voicelab/vlt5-base-keywords")
self.keyword_tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords")
# Sentiment Classifier
self.classifier = pipeline("text-classification")
def __call__(self, link, lang, size):
if self.yt is None:
self.yt = YouTube(link)
path = self.yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
if lang == "none":
lang = None
if size != self.current_size:
self.loaded_model = whisper.load_model(size)
self.current_size = size
results = self.loaded_model.transcribe(path, language=lang)
# Perform summarization on the transcription
transcription_summary = self.summarizer(results["text"], max_length=130, min_length=30, do_sample=False)
# Extract keywords using VoiceLabT5
task_prefix = "Keywords: "
input_sequence = task_prefix + results["text"]
input_ids = self.keyword_tokenizer(input_sequence, return_tensors="pt", truncation=False).input_ids
output = self.keyword_model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
keywords = [x.strip() for x in predicted.split(',') if x.strip()]
label = self.classifier(results["text"])[0]["label"]
return results["text"], transcription_summary[0]["summary_text"], keywords, label
def populate_metadata(self, link):
self.yt = YouTube(link)
return self.yt.thumbnail_url, self.yt.title
def from_audio_input(self, lang, size, audio_file):
if lang == "none":
lang = None
if size != self.current_size:
self.loaded_model = whisper.load_model(size)
self.current_size = size
results = self.loaded_model.transcribe(audio_file, language=lang)
# Perform summarization on the transcription
transcription_summary = self.summarizer(results["text"], max_length=130, min_length=30, do_sample=False)
# Extract keywords using VoiceLabT5
task_prefix = "Keywords: "
input_sequence = task_prefix + results["text"]
input_ids = self.keyword_tokenizer(input_sequence, return_tensors="pt", truncation=False).input_ids
output = self.keyword_model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
keywords = [x.strip() for x in predicted.split(',') if x.strip()]
label = self.classifier(results["text"])[0]["label"]
return results["text"], transcription_summary[0]["summary_text"], keywords, label
gio = GradioInference()
title = "Youtube Insights"
description = "Your AI-powered video analytics tool"
block = gr.Blocks()
with block as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div>
<h1>Youtube <span style="color: red;">Insights</span> 📹</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Your AI-powered video analytics tool
</p>
</div>
"""
)
with gr.Group():
with gr.Tab("From YouTube"):
with gr.Box():
with gr.Row().style(equal_height=True):
size = gr.Dropdown(label="Model Size", choices=gio.sizes, value='base')
lang = gr.Dropdown(label="Language (Optional)", choices=gio.langs, value="none")
link = gr.Textbox(label="YouTube Link", placeholder="Enter YouTube link...")
title = gr.Label(label="Video Title")
with gr.Row().style(equal_height=True):
img = gr.Image(label="Thumbnail")
text = gr.Textbox(label="Transcription", placeholder="Transcription Output...", lines=10).style(show_copy_button=True, container=True)
with gr.Row().style(equal_height=True):
summary = gr.Textbox(label="Summary", placeholder="Summary Output...", lines=5).style(show_copy_button=True, container=True)
keywords = gr.Textbox(label="Keywords", placeholder="Keywords Output...", lines=5).style(show_copy_button=True, container=True)
label = gr.Label(label="Sentiment Analysis")
with gr.Row().style(equal_height=True):
clear = gr.ClearButton([link, title, img, text, summary, keywords, label], scale=1)
btn = gr.Button("Get video insights", variant='primary', scale=1)
btn.click(gio, inputs=[link, lang, size], outputs=[text, summary, keywords, label])
link.change(gio.populate_metadata, inputs=[link], outputs=[img, title])
with gr.Tab("From Audio file"):
with gr.Box():
with gr.Row().style(equal_height=True):
size = gr.Dropdown(label="Model Size", choices=gio.sizes, value='base')
lang = gr.Dropdown(label="Language (Optional)", choices=gio.langs, value="none")
audio_file = gr.Audio(type="filepath")
with gr.Row().style(equal_height=True):
text = gr.Textbox(label="Transcription", placeholder="Transcription Output...", lines=10).style(show_copy_button=True, container=False)
# with gr.Row().style(equal_height=True):
summary = gr.Textbox(label="Summary", placeholder="Summary Output", lines=5)
keywords = gr.Textbox(label="Keywords", placeholder="Keywords Output", lines=5)
label = gr.Label(label="Sentiment Analysis")
with gr.Row().style(equal_height=True):
clear = gr.ClearButton([text], scale=1)
btn = gr.Button("Get video insights", variant='primary', scale=1) # Updated button label
btn.click(gio.from_audio_input, inputs=[lang, size, audio_file], outputs=[text, summary, keywords, label])
with block:
gr.Markdown("About the app:")
with gr.Accordion("What is YouTube Insights?", open=False):
gr.Markdown("YouTube Insights is a tool developed with academic purposes only, that creates summaries, keywords and sentiments analysis based on YouTube videos or user audio files.")
with gr.Accordion("How does it work?", open=False):
gr.Markdown("Works by using OpenAI's Whisper, DistilBART for summarization and VoiceLabT5 for Keyword Extraction.")
gr.HTML("""
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<p style="margin-bottom: 10px; font-size: 96%">
2023 Master in Big Data & Data Science - Universidad Complutense de Madrid
</p>
</div>
""")
demo.launch() |