|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
def transcribe_speech(filepath): |
|
model_id = "chaouch/whisper-small-dv" |
|
pipe = pipeline("automatic-speech-recognition", model=model_id) |
|
output = pipe( |
|
filepath, |
|
max_new_tokens=256, |
|
generate_kwargs={ |
|
"task": "transcribe", |
|
"language": "sinhalese", |
|
}, |
|
chunk_length_s=30, |
|
batch_size=8, |
|
) |
|
return output["text"] |
|
|
|
if __name__ == "__main__": |
|
demo = gr.Interface(fn=transcribe_speech, |
|
inputs=gr.Audio(sources="microphone", type="file"), |
|
outputs=gr.Textbox()) |
|
demo.launch(debug=True) |
|
|