|
import gradio as gr |
|
from transformers import pipeline, VitsModel, AutoTokenizer |
|
import numpy as np |
|
import torch |
|
import scipy |
|
|
|
|
|
pipe = pipeline( |
|
"automatic-speech-recognition", |
|
model="Baghdad99/saad-speech-recognition-hausa-audio-to-text", |
|
tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text" |
|
) |
|
translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text") |
|
model = VitsModel.from_pretrained("facebook/mms-tts-eng") |
|
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng") |
|
|
|
|
|
def translate_speech(audio): |
|
|
|
sample_rate, audio_data = audio |
|
|
|
|
|
output = pipe(audio_data) |
|
print(f"Output: {output}") |
|
|
|
|
|
if 'text' in output[0]: |
|
transcription = output[0]["text"] |
|
else: |
|
print("The output does not contain 'text'") |
|
return |
|
|
|
|
|
translated_text = translator(transcription) |
|
print(f"Translated text: {translated_text}") |
|
|
|
|
|
inputs = tokenizer(translated_text[0]['translation_text'], return_tensors="pt") |
|
with torch.no_grad(): |
|
output = model.generate(**inputs) |
|
|
|
|
|
scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=output.float().numpy()) |
|
|
|
print("Translated text:", translated_text[0]['translation_text']) |
|
print("Synthesized speech data shape:", output.shape) |
|
print("Sampling rate:", model.config.sampling_rate) |
|
|
|
return 16000, output.numpy() |
|
|
|
|
|
iface = gr.Interface( |
|
fn=translate_speech, |
|
inputs=gr.inputs.Audio(source="microphone", type="numpy"), |
|
outputs=gr.outputs.Audio(type="numpy"), |
|
title="Hausa to English Translation", |
|
description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis." |
|
) |
|
|
|
iface.launch() |
|
|