Baghdad99 commited on
Commit
41298c4
1 Parent(s): ee17a2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -1,6 +1,8 @@
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer
3
  import numpy as np
 
 
4
 
5
  # Load the pipeline for speech recognition and translation
6
  pipe = pipeline(
@@ -9,7 +11,8 @@ pipe = pipeline(
9
  tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
10
  )
11
  translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
12
- tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")
 
13
 
14
  # Define the function to translate speech
15
  def translate_speech(audio):
@@ -39,19 +42,16 @@ def translate_speech(audio):
39
  print("The translated text does not contain 'generated_token_ids'")
40
  return
41
 
42
- # Use the text-to-speech pipeline to synthesize the translated text
43
- synthesised_speech = tts(translated_text_str)
44
- print(f"Synthesised speech: {synthesised_speech}") # Print the synthesised speech to see what it contains
 
45
 
46
- # Check if the synthesised speech contains 'audio'
47
- if 'audio' in synthesised_speech:
48
- synthesised_speech_data = synthesised_speech['audio']
49
- else:
50
- print("The synthesised speech does not contain 'audio'")
51
- return
52
 
53
  # Scale the audio data to the range of int16 format
54
- synthesised_speech = (synthesised_speech_data * 32767).astype(np.float32)
55
 
56
  return 16000, synthesised_speech
57
 
 
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer
3
  import numpy as np
4
+ import torch
5
+ import scipy
6
 
7
  # Load the pipeline for speech recognition and translation
8
  pipe = pipeline(
 
11
  tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
12
  )
13
  translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
14
+ model = VitsModel.from_pretrained("facebook/mms-tts-eng")
15
+ tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
16
 
17
  # Define the function to translate speech
18
  def translate_speech(audio):
 
42
  print("The translated text does not contain 'generated_token_ids'")
43
  return
44
 
45
+ # Use the VITS model to synthesize the translated text into speech
46
+ inputs = tokenizer(translated_text_str, return_tensors="pt")
47
+ with torch.no_grad():
48
+ output = model(**inputs).waveform
49
 
50
+ # Save the synthesized speech to a WAV file
51
+ scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=output.float().numpy())
 
 
 
 
52
 
53
  # Scale the audio data to the range of int16 format
54
+ synthesised_speech = (output * 32767).astype(np.int16)
55
 
56
  return 16000, synthesised_speech
57