Spaces:
Runtime error
Runtime error
import os | |
os.system("pip install git+https://github.com/openai/whisper.git") | |
import gradio as gr | |
import whisper | |
def speech_to_text(tmp_filename, model_size): | |
model = whisper.load_model(model_size) | |
# load audio and pad/trim it to fit 30 seconds | |
audio = whisper.load_audio(tmp_filename) | |
audio = whisper.pad_or_trim(audio) | |
# make log-Mel spectrogram and move to the same device as the model | |
mel = whisper.log_mel_spectrogram(audio).to(model.device) | |
# detect the spoken language | |
_, probs = model.detect_language(mel) | |
print(f"Detected language: {max(probs, key=probs.get)}") | |
# decode the audio | |
options = whisper.DecodingOptions() | |
result = whisper.decode(model, mel, options) | |
# print the recognized text | |
print(result.text) | |
return result.text | |
gr.Interface( | |
theme="dark", | |
title="Whisper by OpenAI", | |
thumbnail="https://cdn.openai.com/whisper/asr-summary-of-model-architecture-desktop.svg", | |
css=""" | |
.gr-prose p{text-align: center;} | |
.gr-button {background: black;color: white} | |
""", | |
description="Whisper is an automatic speech recognition (ASR) system trained on 680,000 hours of multilingual and multitask supervised data collected from the web.", | |
fn=speech_to_text, | |
inputs=[ | |
gr.Audio(label="Record your voice on your mic",source="microphone", type="filepath"), | |
gr.Dropdown(label="Select model size",value="base",choices=["tiny", "base", "small", "medium", "large"])], | |
outputs="text").launch() | |