File size: 2,309 Bytes
5b74a4b 7b2ef40 5c4fa2e 41298c4 5b74a4b 41298c4 5b74a4b 72632b9 5b74a4b 425531b 5b74a4b a48f8e0 5b74a4b 72632b9 a63c502 a48f8e0 72632b9 a48f8e0 5b74a4b 72632b9 a63c502 01153e2 5b74a4b 41298c4 a63c502 41298c4 a63c502 730fef5 41298c4 c58bd88 a63c502 17cfe18 a63c502 a5ec736 b2c7d3a 5b74a4b eaff29b 5b74a4b b2c7d3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
from transformers import pipeline, VitsModel, AutoTokenizer
import numpy as np
import torch
import scipy
# Load the pipeline for speech recognition and translation
pipe = pipeline(
"automatic-speech-recognition",
model="Baghdad99/saad-speech-recognition-hausa-audio-to-text",
tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
)
translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
model = VitsModel.from_pretrained("facebook/mms-tts-eng")
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
# Define the function to translate speech
def translate_speech(audio):
# Separate the sample rate and the audio data
sample_rate, audio_data = audio
# Use the speech recognition pipeline to transcribe the audio
output = pipe(audio_data)
print(f"Output: {output}") # Print the output to see what it contains
# Check if the output contains 'text'
if 'text' in output[0]:
transcription = output[0]["text"]
else:
print("The output does not contain 'text'")
return
# Use the translation pipeline to translate the transcription
translated_text = translator(transcription)
print(f"Translated text: {translated_text}") # Print the translated text to see what it contains
# Use the VITS model to synthesize the translated text into speech
inputs = tokenizer(translated_text[0]['translation_text'], return_tensors="pt")
with torch.no_grad():
output = model.generate(**inputs)
# Save the synthesized speech to a WAV file
scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=output.float().numpy())
print("Translated text:", translated_text[0]['translation_text'])
print("Synthesized speech data shape:", output.shape)
print("Sampling rate:", model.config.sampling_rate)
return 16000, output.numpy()
# Define the Gradio interface
iface = gr.Interface(
fn=translate_speech,
inputs=gr.inputs.Audio(source="microphone", type="numpy"),
outputs=gr.outputs.Audio(type="numpy"),
title="Hausa to English Translation",
description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis."
)
iface.launch()
|