Spaces:
Runtime error
Runtime error
File size: 1,156 Bytes
081257a b0a1fc2 081257a e67af8d 081257a e67af8d 984a28c 081257a 3c1525c 8d40357 14ea07b d53ddc9 df3e71c d53ddc9 0e6621e 081257a 129bfd6 e67af8d 754a024 be632c4 081257a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import gradio as gr
import whisper
from langcodes import *
def speech_to_text(tmp_filename, uploaded, model_size):
model = whisper.load_model(model_size)
source = uploaded if uploaded is not None else tmp_filename
result = model.transcribe(source)
return f'Detected language: {Language.make(language=result["language"]).display_name()}\n\n You said: {result["text"]}'
gr.Interface(
title="Whisper by OpenAI",
thumbnail="https://cdn.openai.com/whisper/asr-summary-of-model-architecture-desktop.svg",
css="""
.gr-prose p{text-align: center;}
.gr-button {background: black;color: white}
""",
description="Whisper is an automatic speech recognition (ASR) system trained on 680,000 hours of multilingual and multitask supervised data collected from the web.",
fn=speech_to_text,
inputs=[
gr.Audio(label="Record your voice on your mic",source="microphone", type="filepath"),
gr.Audio(source="upload", type="filepath", label="Upload Audio"),
gr.Dropdown(label="Select model size",value="base",choices=["tiny", "base", "small", "medium", "large"])],
outputs="text").launch()
|