import os import gradio as gr from faster_whisper import WhisperModel from pytube import YouTube # Inicializar el modelo Whisper model = WhisperModel("base", device="cpu", compute_type="int8") def transcribe_audio(audio_path): segments, _ = model.transcribe(audio_path, beam_size=5) return " ".join([segment.text for segment in segments]) def process_youtube(youtube_url): try: yt = YouTube(youtube_url) audio_stream = yt.streams.filter(only_audio=True).first() if not os.path.exists("temp"): os.makedirs("temp") output_path = audio_stream.download(output_path="temp") return transcribe_audio(output_path) except Exception as e: return f"Error processing YouTube URL: {str(e)}" def transcribe(audio_file, youtube_url): if audio_file: return transcribe_audio(audio_file) elif youtube_url: return process_youtube(youtube_url) else: return "Please provide either an audio file or a YouTube URL." # Definir la interfaz de Gradio iface = gr.Interface( fn=transcribe, inputs=[ gr.Audio(type="filepath", label="Upload Audio File"), gr.Textbox(label="Or Enter YouTube URL") ], outputs="text", title="Whisper Transcription App", description="Upload an audio file or provide a YouTube URL to transcribe. Note: This is running on CPU, so processing might be slower." ) # Lanzar la aplicaciĆ³n iface.launch()