Spaces:
Runtime error
Runtime error
File size: 14,127 Bytes
635f007 d430de8 a520615 b21342c 73dbaa9 a520615 635f007 e2e4977 0675d4f 50a9d0f 6eb9ea3 b21342c e27b102 6440f80 e27b102 a5cfbc2 46e08d9 6440f80 e2e4977 6440f80 46e08d9 6440f80 d430de8 6440f80 d430de8 635f007 2f1c8d2 d430de8 39e26fe cf9af86 addff22 d430de8 4b8ade9 e2e4977 818d7f0 cf9af86 e2e4977 4b8ade9 a5cfbc2 4b8ade9 cf9af86 4b8ade9 6440f80 635f007 02ce177 688e15e d03e414 635f007 2480943 a5cfbc2 50a9d0f 3b20a3f 50a9d0f c609d03 50a9d0f 635f007 4cbfe7b 635f007 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import gradio as gr
import styletts2importable
import ljspeechimportable
import torch
import os
from tortoise.utils.text import split_and_recombine_text
import numpy as np
import pickle
theme = gr.themes.Base(
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
)
voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3', 'm-us-4']
voices = {}
import phonemizer
global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
# todo: cache computed style, load using pickle
# if os.path.exists('voices.pkl'):
# with open('voices.pkl', 'rb') as f:
# voices = pickle.load(f)
# else:
for v in voicelist:
voices[v] = styletts2importable.compute_style(f'voices/{v}.wav')
# def synthesize(text, voice, multispeakersteps):
# if text.strip() == "":
# raise gr.Error("You must enter some text")
# # if len(global_phonemizer.phonemize([text])) > 300:
# if len(text) > 300:
# raise gr.Error("Text must be under 300 characters")
# v = voice.lower()
# # return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=7, embedding_scale=1))
# return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=multispeakersteps, embedding_scale=1))
def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
if text.strip() == "":
raise gr.Error("You must enter some text")
if len(text) > 7500:
raise gr.Error("Text must be <7.5k characters")
texts = split_and_recombine_text(text)
v = voice.lower()
audios = []
for t in progress.tqdm(texts):
audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
return (24000, np.concatenate(audios))
# def longsynthesize(text, voice, lngsteps, password, progress=gr.Progress()):
# if password == os.environ['ACCESS_CODE']:
# if text.strip() == "":
# raise gr.Error("You must enter some text")
# if lngsteps > 25:
# raise gr.Error("Max 25 steps")
# if lngsteps < 5:
# raise gr.Error("Min 5 steps")
# texts = split_and_recombine_text(text)
# v = voice.lower()
# audios = []
# for t in progress.tqdm(texts):
# audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
# return (24000, np.concatenate(audios))
# else:
# raise gr.Error('Wrong access code')
def clsynthesize(text, voice, vcsteps, progress=gr.Progress()):
# if text.strip() == "":
# raise gr.Error("You must enter some text")
# # if global_phonemizer.phonemize([text]) > 300:
# if len(text) > 400:
# raise gr.Error("Text must be under 400 characters")
# # return (24000, styletts2importable.inference(text, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=20, embedding_scale=1))
# return (24000, styletts2importable.inference(text, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1))
if text.strip() == "":
raise gr.Error("You must enter some text")
if len(text) > 7500:
raise gr.Error("Text must be <7.5k characters")
texts = split_and_recombine_text(text)
audios = []
for t in progress.tqdm(texts):
audios.append(styletts2importable.inference(t, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1))
return (24000, np.concatenate(audios))
def ljsynthesize(text, steps, progress=gr.Progress()):
# if text.strip() == "":
# raise gr.Error("You must enter some text")
# # if global_phonemizer.phonemize([text]) > 300:
# if len(text) > 400:
# raise gr.Error("Text must be under 400 characters")
noise = torch.randn(1,1,256).to('cuda' if torch.cuda.is_available() else 'cpu')
# return (24000, ljspeechimportable.inference(text, noise, diffusion_steps=7, embedding_scale=1))
if text.strip() == "":
raise gr.Error("You must enter some text")
if len(text) > 7500:
raise gr.Error("Text must be <7.5k characters")
texts = split_and_recombine_text(text)
audios = []
for t in progress.tqdm(texts):
audios.append(ljspeechimportable.inference(t, noise, diffusion_steps=steps, embedding_scale=1))
return (24000, np.concatenate(audios))
with gr.Blocks() as vctk: # just realized it isn't vctk but libritts but i'm too lazy to change it rn
with gr.Row():
with gr.Column(scale=1):
inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True)
multispeakersteps = gr.Slider(minimum=3, maximum=15, value=7, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
# use_gruut = gr.Checkbox(label="Use alternate phonemizer (Gruut) - Experimental")
with gr.Column(scale=1):
btn = gr.Button("Synthesize", variant="primary")
audio = gr.Audio(interactive=False, label="Synthesized Audio")
btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4)
with gr.Blocks() as clone:
with gr.Row():
with gr.Column(scale=1):
clinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
clvoice = gr.Audio(label="Voice", interactive=True, type='filepath', max_length=300)
vcsteps = gr.Slider(minimum=3, maximum=20, value=20, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
with gr.Column(scale=1):
clbtn = gr.Button("Synthesize", variant="primary")
claudio = gr.Audio(interactive=False, label="Synthesized Audio")
clbtn.click(clsynthesize, inputs=[clinp, clvoice, vcsteps], outputs=[claudio], concurrency_limit=4)
# with gr.Blocks() as longText:
# with gr.Row():
# with gr.Column(scale=1):
# lnginp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
# lngvoice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-1', interactive=True)
# lngsteps = gr.Slider(minimum=5, maximum=25, value=10, step=1, label="Diffusion Steps", info="Higher = better quality, but slower", interactive=True)
# lngpwd = gr.Textbox(label="Access code", info="This feature is in beta. You need an access code to use it as it uses more resources and we would like to prevent abuse")
# with gr.Column(scale=1):
# lngbtn = gr.Button("Synthesize", variant="primary")
# lngaudio = gr.Audio(interactive=False, label="Synthesized Audio")
# lngbtn.click(longsynthesize, inputs=[lnginp, lngvoice, lngsteps, lngpwd], outputs=[lngaudio], concurrency_limit=4)
with gr.Blocks() as lj:
with gr.Row():
with gr.Column(scale=1):
ljinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
ljsteps = gr.Slider(minimum=3, maximum=20, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
with gr.Column(scale=1):
ljbtn = gr.Button("Synthesize", variant="primary")
ljaudio = gr.Audio(interactive=False, label="Synthesized Audio")
ljbtn.click(ljsynthesize, inputs=[ljinp, ljsteps], outputs=[ljaudio], concurrency_limit=4)
with gr.Blocks(title="StyleTTS 2", css="footer{display:none !important}", theme=theme) as demo:
gr.Markdown("""# StyleTTS 2
[Paper](https://arxiv.org/abs/2306.07691) - [Samples](https://styletts2.github.io/) - [Code](https://github.com/yl4579/StyleTTS2)
<img src="https://storage.googleapis.com/ad-auris-django-bucket/media/Screenshot%202023-10-13%20at%2012.30.56%20PM.png" alt="alt text" width="250" height="250"/> <img src="https://storage.googleapis.com/ad-auris-django-bucket/media/DALL%C2%B7E%202023-12-11%2001.17.36%20-%20Create%20a%20logo%20redesign%20that%20is%20minimalistic%2C%20incorporating%20cosmic%20and%20AI%20themes.%20The%20design%20should%20feature%20a%20simplified%20version%20of%20the%20'AD%20AURIS'%20bars.png" alt="alt text" width="250" height="250"/>
## Ve's cliffnotes on StyleTTS2
StyleTTS2 is an advanced text-to-speech (TTS) model that represents a significant evolution in the field of speech synthesis. Developed by Yinghao Aaron Li and his team, it is designed to achieve human-level TTS synthesis by leveraging style diffusion and adversarial training with large speech language models (SLMs). You can find the official GitHub repository for StyleTTS2 here.
### Key Features of StyleTTS2:
Style Diffusion: StyleTTS2 models styles as latent random variables through diffusion models. This allows the generation of the most suitable style for the text without requiring reference speech. It efficiently implements latent diffusion, benefiting from the diverse speech synthesis capabilities of diffusion models.
Adversarial Training with SLMs: The use of large pre-trained SLMs, such as WavLM, as discriminators is a novel aspect. The model incorporates differentiable duration modeling for end-to-end training, which results in improved speech naturalness.
Human-Level TTS Synthesis: StyleTTS2 surpasses human recordings on single-speaker datasets and matches them on multispeaker datasets, as judged by native English speakers. When trained on the LibriTTS dataset, it outperforms previous publicly available models for zero-shot speaker adaptation.
Training and Inference: The model undergoes an end-to-end training process that jointly optimizes all components. This includes direct waveform synthesis and adversarial training with SLMs. It uses differentiable duration modeling and a non-parametric differentiable upsampler for stability during training.
Diverse Speech Generation: The style diffusion approach in StyleTTS2 allows for diverse speech generation without the need for reference audio, a significant improvement over traditional TTS models that often rely on reference speech for expressiveness.
### What makes the speech generation go at lightening speed:
StyleTTS2 incorporates several methods that contribute to faster and more efficient speech generation compared to traditional text-to-speech (TTS) models:
End-to-End Training: The end-to-end (E2E) training approach in StyleTTS2 optimizes all components of the TTS system simultaneously. This means that during inference, the system does not rely on separate, fixed components such as pre-trained vocoders for converting mel-spectrograms into waveforms. This integrated approach can lead to faster processing times during both training and inference stages.
Non-Autoregressive Framework: StyleTTS2, like its predecessor StyleTTS, is based on a non-autoregressive TTS framework. Non-autoregressive models can generate speech faster than autoregressive models because they do not require the sequential generation of each audio segment. Instead, they can generate multiple parts of the speech simultaneously.
Direct Waveform Synthesis: The model employs a modified decoder that directly generates the waveform, as opposed to generating intermediate representations like mel-spectrograms that then need to be converted into audio. This direct approach can reduce the processing time by eliminating the need for an additional vocoding stage.
Diffusion Model for Style Sampling: The style diffusion approach in StyleTTS2 allows the model to sample speech styles efficiently. This method can be faster than traditional style encoding techniques, which often require additional processing to capture the style from reference speech.
Differentiable Upsampling: The use of differentiable upsampling in StyleTTS2 is designed to be more efficient and stable during training, which can lead to faster model convergence and thus reduce overall training time.
Optimized Model Components: The integration of advanced components such as the multi-period discriminator (MPD) and multi-resolution discriminator (MRD), along with efficient loss functions like the LSGAN loss, contributes to more efficient training and potentially faster speech generation.
While these methods contribute to the efficiency of StyleTTS2, the actual speed of speech generation can also depend on other factors such as the hardware used, the complexity of the input text, and the specific configuration of the model.
**NOTE: StyleTTS 2 does better on longer texts.** For example, making it say "hi" will produce a lower-quality result than making it say a longer phrase.""")
# gr.TabbedInterface([vctk, clone, lj, longText], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
gr.TabbedInterface([vctk, clone, lj], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
gr.Markdown("""
Demo by [print-VarunSharma](https://github.com/print-VarunSharma). I am not affiliated with the StyleTTS 2 authors.
Run this demo locally using Docker:
```bash
docker run -it -p 7860:7860 --platform=linux/amd64 --gpus all registry.hf.space/styletts2-styletts2:latest python app.py
```
""")
if __name__ == "__main__":
demo.queue(api_open=False, max_size=15).launch(show_api=False)
|