import spaces
import tempfile
import wave
import gradio as gr
import os
import re
import torch
import soundfile as sf
import numpy as np
import torch.nn.functional as F
from whisperspeech.pipeline import Pipeline
from whisperspeech.languages import LANGUAGES
from whisperspeech.utils import resampler
title = """# 🙋🏻♂️ Welcome to🌟Collabora🌬️💬📝WhisperSpeech
You can use this ZeroGPU Space to test out the current model [🌬️💬📝collabora/whisperspeech](https://huggingface.co/collabora/whisperspeech). 🌬️💬📝collabora/whisperspeech is An Open Source text-to-speech system built by inverting Whisper. Install it and use your command line interface locally with `pip install whisperspeech`. It's like Stable Diffusion but for speech – both powerful and easily customizable : so you can use it programmatically in your own pipelines! [Contribute to whisperspeech here](https://github.com/collabora/WhisperSpeech)
You can also use 🌬️💬📝WhisperSpeech by cloning this space. 🧬🔬🔍 Simply click here:
We're **celebrating the release of the whisperspeech** at [the LAION community, if you love open source ai learn more here : https://laion.ai/](https://laion.ai/) big thanks to the folks at huggingface for the community grant 🤗
### How to Use
Input text with tahe language identifiers provided to create a multilingual speech. Optionally you can add an audiosample to make a voice print.Scroll down and try the api <3 Gradio.
This space runs on ZeroGPU, so **you need to be patient** while you acquire the GPU and load the model the first time you make a request !
"""
text_examples = [
[" WhisperSpeech is an opensource library that helps you hack whisper."],
[" WhisperSpeech is multi-lingual y puede cambiar de idioma मध्य वाक्य में"],
[" The big difference between Europe et les Etats Unis jest to, że mamy tak wiele języków тут, в Європі"]
]
def parse_multilingual_text(input_text):
pattern = r"(?:<(\w+)>)|([^<]+)"
cur_lang = 'en'
segments = []
for i, (lang, txt) in enumerate(re.findall(pattern, input_text)):
if lang: cur_lang = lang
else: segments.append((cur_lang, f" {txt} ")) # add spaces to give it some time to switch languages
if not segments: return [("en", "")]
return segments
@spaces.GPU(enable_queue=True)
def generate_segment_audio(text, lang, speaker_audio, pipe):
if not isinstance(text, str):
text = text.decode("utf-8") if isinstance(text, bytes) else str(text)
speaker_audio_data = speaker_audio
audio_data = pipe.generate(text, speaker_audio_data, lang)
resample_audio = resampler(newsr=24000)
audio_data_resampled = next(resample_audio([{'sample_rate': 24000, 'samples': audio_data.cpu()}]))['samples_24k']
audio_np = audio_data_resampled.cpu().numpy()
# Debug statement print("Shape after resampling:", audio_np.shape)
return audio_np
def concatenate_audio_segments(segments):
concatenated_audio = np.concatenate(segments , axis=1)
return concatenated_audio
@spaces.GPU(enable_queue=True)
def whisper_speech_demo(multilingual_text, speaker_audio):
segments = parse_multilingual_text(multilingual_text)
if not segments:
return None, "No valid language segments found. Please use the format: text"
pipe = Pipeline()
if not hasattr(pipe, 's2a'):
return None, "Pipeline initialization failed. s2a model not loaded."
speaker_url = speaker_audio if speaker_audio is not None else None
audio_segments = []
for lang, text in segments:
text_str = text if isinstance(text, str) else str(text)
audio_np = generate_segment_audio(text_str, lang, speaker_url, pipe)
# Debug statement print("Audio segment shape:", audio_np.shape)
audio_segments.append(audio_np)
concatenated_audio = concatenate_audio_segments(audio_segments)
# Debug statement print("Final concatenated audio shape:", concatenated_audio.shape)
concatenated_audio = concatenated_audio / np.max(np.abs(concatenated_audio))
return (24000, concatenated_audio.T)
with gr.Blocks() as demo:
gr.Markdown(title)
output_audio = gr.Audio(label="🌟Collabora🌬️💬📝WhisperSpeech")
generate_button = gr.Button("Try 🌟Collabora🌬️💬📝WhisperSpeech")
with gr.Accordion("🌟Collabora🌬️WhisperSpeech💬Voice Print and📝Language List", open=False):
with gr.Row():
speaker_input = gr.Audio(label="Upload or Record Speaker Audio (optional)🌬️💬",
sources=["upload", "microphone"])
with gr.Row():
with gr.Accordion("Available Languages and Their Tags", open=False):
formatted_language_list = "\n".join([f"`<{lang}>` {LANGUAGES[lang]}" for lang in LANGUAGES])
gr.Markdown(formatted_language_list)
with gr.Row():
text_input = gr.Textbox(label="Enter multilingual text💬📝",
placeholder="e.g., Hello Bonjour Hola")
with gr.Row():
with gr.Accordion("Try Multilingual Text Examples", open=False):
gr.Examples(
examples=text_examples,
inputs=[text_input],
outputs=[output_audio],
fn=whisper_speech_demo,
cache_examples=False,
label="Try these to get started !🌟🌬️"
)
generate_button.click(whisper_speech_demo, inputs=[text_input, speaker_input], outputs=output_audio)
demo.launch()