|
import gradio as gr |
|
|
|
model_id = 'heisenberg3376/whisper-tiny-minds14' |
|
pipe = pipeline("automatic-speech-recognition", model=model_id) |
|
|
|
def transcribe_speech(filepath): |
|
output = pipe( |
|
filepath, |
|
max_new_tokens=256, |
|
generate_kwargs={ |
|
"task": "transcribe", |
|
}, |
|
chunk_length_s=30, |
|
batch_size=8, |
|
) |
|
return output["text"] |
|
|
|
|
|
|
|
|
|
demo = gr.Blocks() |
|
|
|
|
|
|
|
mic_transcribe = gr.Interface( |
|
fn=transcribe_speech, |
|
inputs=gr.Audio(sources="microphone", type="filepath"), |
|
outputs=gr.Textbox(), |
|
) |
|
|
|
file_transcribe = gr.Interface( |
|
fn=transcribe_speech, |
|
inputs=gr.Audio(sources="upload", type="filepath"), |
|
outputs=gr.Textbox(), |
|
) |
|
|
|
with demo: |
|
gr.TabbedInterface( |
|
[mic_transcribe, file_transcribe], |
|
["Transcribe Microphone", "Transcribe Audio File"], |
|
) |
|
|
|
demo.launch(debug=True, share=True) |