File size: 2,242 Bytes
5b74a4b 5c4fa2e 5b74a4b 72632b9 5b74a4b 425531b 5b74a4b a48f8e0 5b74a4b 72632b9 a48f8e0 72632b9 a48f8e0 5b74a4b 72632b9 a99bdb2 01153e2 5b74a4b 01153e2 1f03166 72632b9 1f03166 5b74a4b 72632b9 17cfe18 72632b9 a5ec736 b2c7d3a 5b74a4b eaff29b 5b74a4b b2c7d3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
from transformers import pipeline
import numpy as np
# Load the pipeline for speech recognition and translation
pipe = pipeline(
"automatic-speech-recognition",
model="Baghdad99/saad-speech-recognition-hausa-audio-to-text",
tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
)
translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")
# Define the function to translate speech
def translate_speech(audio):
# Separate the sample rate and the audio data
sample_rate, audio_data = audio
# Use the speech recognition pipeline to transcribe the audio
output = pipe(audio_data)
print(f"Output: {output}") # Print the output to see what it contains
# Check if the output contains 'text'
if 'text' in output:
transcription = output["text"]
else:
print("The output does not contain 'text'")
return
# Use the translation pipeline to translate the transcription
translated_text = translator(transcription, return_tensors="pt")
print(f"Translated text: {translated_text}") # Print the translated text to see what it contains
# Check if the translated text contains 'generated_text'
if 'generated_text' in translated_text[0]:
translated_text_str = translated_text[0]['generated_text']
else:
print("The translated text does not contain 'generated_text'")
return
# Use the text-to-speech pipeline to synthesize the translated text
synthesised_speech = tts(translated_text_str)
# Define the max_range variable
max_range = 32767 # You can adjust this value based on your requirements
synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.int16)
return 16000, synthesised_speech
# Define the Gradio interface
iface = gr.Interface(
fn=translate_speech,
inputs=gr.inputs.Audio(source="microphone", type="numpy"),
outputs=gr.outputs.Audio(type="numpy"),
title="Hausa to English Translation",
description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis."
)
iface.launch()
|