import gradio as gr import whisper from pytube import YouTube import yake from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration class GradioInference(): def __init__(self): self.sizes = list(whisper._MODELS.keys()) self.langs = ["none"] + sorted(list(whisper.tokenizer.LANGUAGES.values())) self.current_size = "base" self.loaded_model = whisper.load_model(self.current_size) self.yt = None # Initialize Facebook/BART-Large-CNN summarizer self.summarizer = pipeline("summarization", model="facebook/bart-large-cnn") self.keyword_model = T5ForConditionalGeneration.from_pretrained("Voicelab/vlt5-base-keywords") self.tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords") def __call__(self, link, lang, size): if self.yt is None: self.yt = YouTube(link) path = self.yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4") if lang == "none": lang = None if size != self.current_size: self.loaded_model = whisper.load_model(size) self.current_size = size results = self.loaded_model.transcribe(path, language=lang) # Perform summarization on the transcription transcription_summary = self.summarizer(results["text"], max_length=130, min_length=30, do_sample=False) task_prefix = "Keywords: " input_sequence = task_prefix + transcription input_ids = tokenizer( input_sequence, return_tensors="pt", truncation=False, ).input_ids output = keyword_model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4) predicted = tokenizer.decode(output[0], skip_special_tokens=True) keywords = [x.strip() for x in predicted.split(',') if x.strip()] return results["text"], transcription_summary[0]["summary_text"], keywords def populate_metadata(self, link): self.yt = YouTube(link) return self.yt.thumbnail_url, self.yt.title gio = GradioInference() title = "Youtube Insights" description = "Your AI-powered Video Analytics" block = gr.Blocks() with block: gr.HTML( """
Your AI-powered Video Analytics