INTROTXT = """#
Repo -> [Hugging Face - 🤗](https://huggingface.co/Respair/Tsukasa_Speech) (soon)
This space uses Tsukasa (24khz).
it is running on an old V100 in Middle East. I'm sorry but expect some occasional connection hiccups.
**Check the Read me tabs down below.**
Enjoy!
"""
import gradio as gr
import random
import os
import pickle
from gradio_client import Client
client = Client(os.environ['src'])
voices = {}
example_texts = {}
prompts = []
inputs = []
theme = gr.themes.Base(
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
)
voicelist = [v for v in os.listdir("reference_sample_wavs")]
for v in voicelist:
voices[v] = f'reference_sample_wavs/{v}'
with open(f'Inference/random_texts.txt', 'r') as r:
random_texts = [line.strip() for line in r]
example_texts = {f"{text[:30]}...": text for text in random_texts}
def update_text_input(preview):
return example_texts[preview]
def get_random_text():
return random.choice(random_texts)
with open('Inference/prompt.txt', 'r') as p:
prompts = [line.strip() for line in p]
with open('Inference/input_for_prompt.txt', 'r') as i:
inputs = [line.strip() for line in i]
last_idx = None
def get_random_prompt_pair():
global last_idx
max_idx = min(len(prompts), len(inputs)) - 1
random_idx = random.randint(0, max_idx)
while random_idx == last_idx:
random_idx = random.randint(0, max_idx)
last_idx = random_idx
return inputs[random_idx], prompts[random_idx]
def Synthesize_Audio(text, voice=None, voice2=None, vcsteps=2, embscale=1, alpha=.4, beta=.4, ros=.1):
# Wrap the file path using the gradio.File class
if voice2 is not None:
voice2 = {"path": voice2, "meta": {"_type": "gradio.FileData"}}
# Call the Gradio endpoint through the client with the appropriate API Name
result = client.predict(
text,
voice,
voice2,
vcsteps,
embscale,
alpha,
beta,
ros,
api_name="/Synthesize_Audio"
)
return result
# Example usage
def LongformSynth_Text(text, s_prev, Kotodama, alpha, beta, t, diffusion_steps, embedding_scale, rate_of_speech):
result = client.predict(
text,
alpha,
beta,
diffusion_steps,
embedding_scale,
rate_of_speech,
api_name="/LongformSynth_Text"
)
return result
def Inference_Synth_Prompt(text, description, Kotodama, alpha, beta, diffusion_steps, embedding_scale, rate_of_speech):
result = client.predict(
text,
description,
alpha,
beta,
diffusion_steps,
embedding_scale,
rate_of_speech,
api_name="/Inference_Synth_Prompt"
)
return result
with gr.Blocks() as audio_inf:
with gr.Row():
with gr.Column(scale=1):
inp = gr.Textbox(label="Text", info="Enter the text", value="きみの存在は、私の心の中で燃える小さな光のよう。きみがいない時、世界は白黒の写真みたいに寂しくて、何も輝いてない。きみの笑顔だけが、私の灰色の日々に色を塗ってくれる。離れてる時間は、めちゃくちゃ長く感じられて、きみへの想いは風船みたいにどんどん膨らんでいく。きみなしの世界なんて、想像できないよ。", interactive=True, scale=5)
voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value=voicelist[7], interactive=True)
voice_2 = gr.Audio(label="Upload your own Audio", interactive=True, type='filepath', max_length=300, waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
with gr.Accordion("Advanced Parameters", open=False):
alpha = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1, label="Alpha", info="a Diffusion sampler parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled", interactive=True)
beta = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1, label="Beta", info="a Diffusion sampler parameter, higher means less affected by the reference | 0 = diffusion is disabled", interactive=True)
multispeakersteps = gr.Slider(minimum=3, maximum=15, value=5, step=1, label="Diffusion Steps", interactive=True)
embscale = gr.Slider(minimum=1, maximum=5, value=1, step=0.1, label="Intensity", info="will impact the expressiveness, if you raise it too much it'll break.", interactive=True)
rate_of_speech = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label="Rate of Speech", info="Higher -> Faster", interactive=True)
with gr.Column(scale=1):
btn = gr.Button("Synthesize", variant="primary")
audio = gr.Audio(interactive=False, label="Synthesized Audio", waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
btn.click(Synthesize_Audio, inputs=[inp, voice, voice_2, multispeakersteps, embscale, alpha, beta, rate_of_speech], outputs=[audio], concurrency_limit=4)
# Kotodama Text sampler Synthesis Block
with gr.Blocks() as longform:
with gr.Row():
with gr.Column(scale=1):
inp_longform = gr.Textbox(
label="Text",
info="Enter the text [Speaker: Text -> japanese or romaji both work, check the last example!] \n Also works without any names. ",
value=list(example_texts.values())[4],
interactive=True,
scale=5
)
with gr.Row():
example_dropdown = gr.Dropdown(
choices=list(example_texts.keys()),
label="Example Texts [pick one!]",
value=list(example_texts.keys())[0],
interactive=True
)
example_dropdown.change(
fn=update_text_input,
inputs=[example_dropdown],
outputs=[inp_longform]
)
with gr.Accordion("Advanced Parameters", open=False):
alpha_longform = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
label="Alpha",
info="a Diffusion parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled",
interactive=True)
beta_longform = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
label="Beta",
info="a Diffusion parameter, higher means less affected by the reference | 0 = diffusion is disabled",
interactive=True)
diffusion_steps_longform = gr.Slider(minimum=3, maximum=15, value=10, step=1,
label="Diffusion Steps",
interactive=True)
embedding_scale_longform = gr.Slider(minimum=1, maximum=5, value=1.25, step=0.1,
label="Intensity",
info="a Diffusion parameter, it will impact the expressiveness, if you raise it too much it'll break.",
interactive=True)
rate_of_speech_longform = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1,
label="Rate of Speech",
info="Higher = Faster",
interactive=True)
with gr.Column(scale=1):
btn_longform = gr.Button("Synthesize", variant="primary")
audio_longform = gr.Audio(interactive=False,
label="Synthesized Audio",
waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
btn_longform.click(LongformSynth_Text,
inputs=[inp_longform,
gr.State(None), # s_prev
gr.State(None), # Kotodama
alpha_longform,
beta_longform,
gr.State(.8), # t parameter
diffusion_steps_longform,
embedding_scale_longform,
rate_of_speech_longform],
outputs=[audio_longform],
concurrency_limit=4)
# Kotodama prompt sampler Inference Block
with gr.Blocks() as prompt_inference:
with gr.Row():
with gr.Column(scale=1):
text_prompt = gr.Textbox(
label="Text",
info="Enter the text to synthesize. This text will also be fed to the encoder. Make sure to see the Read Me for more details!",
value=inputs[0],
interactive=True,
scale=5
)
description_prompt = gr.Textbox(
label="Description",
info="Enter a highly detailed, descriptive prompt that matches the vibe of your text to guide the synthesis.",
value=prompts[0],
interactive=True,
scale=7
)
with gr.Row():
random_btn = gr.Button('Random Example', variant='secondary')
with gr.Accordion("Advanced Parameters", open=False):
embedding_scale_prompt = gr.Slider(minimum=1, maximum=5, value=1, step=0.25,
label="Intensity",
info="it will impact the expressiveness, if you raise it too much it'll break.",
interactive=True)
alpha_prompt = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
label="Alpha",
info="a Diffusion sampler parameter handling the timbre, higher means less affected by the reference | 0 = diffusion is disabled",
interactive=True)
beta_prompt = gr.Slider(minimum=0, maximum=1, value=0.0, step=0.1,
label="Beta",
info="a Diffusion sampler parameter, higher means less affected by the reference | 0 = diffusion is disabled",
interactive=True)
diffusion_steps_prompt = gr.Slider(minimum=3, maximum=15, value=10, step=1,
label="Diffusion Steps",
interactive=True)
rate_of_speech_prompt = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1,
label="Rate of Speech",
info="Higher = Faster",
interactive=True)
with gr.Column(scale=1):
btn_prompt = gr.Button("Synthesize with Prompt", variant="primary")
audio_prompt = gr.Audio(interactive=False,
label="Prompt-based Synthesized Audio",
waveform_options={'waveform_color': '#a3ffc3', 'waveform_progress_color': '#e972ab'})
random_btn.click(
fn=get_random_prompt_pair,
inputs=[],
outputs=[text_prompt, description_prompt]
)
btn_prompt.click(Inference_Synth_Prompt,
inputs=[text_prompt,
description_prompt,
gr.State(None),
alpha_prompt,
beta_prompt,
diffusion_steps_prompt,
embedding_scale_prompt,
rate_of_speech_prompt],
outputs=[audio_prompt],
concurrency_limit=4)
notes = """
This work is somewhat different from your typical speech model. It offers a high degree of control
over the generation process, which means it's easy to inadvertently produce unimpressive outputs.
Kotodama and the Diffusion sampler can significantly help guide the generation towards
something that aligns with your input, but they aren't foolproof. turn off the diffusion sampler or
set it to very low values if it doesn't sound good to you.
The prompt encoder is also highly experimental and should be treated as a proof of concept. Due to the
overwhelming ratio of female to male speakers and the wide variation in both speakers and their expressions,
the prompt encoder may occasionally produce subpar or contradicting outputs. For example, high expressiveness alongside
high pitch has been associated with females speakers simply because I had orders of magnitude more of them in the dataset.
________________________________________________________
A useful note about the voice design and prompting:
\n
The vibe of the dialogue impacts the generated voice since the Japanese dialogue
and the prompts were jointly trained. This is a peculiar feature of the Japanese lanuage.
For example if you use 俺 (ore)、僕(boku) or your input is overall masculine
you may get a guy's voice, even if you describe it as female in the prompt.
\n
The Japanese text that is fed to the prompt doesn't necessarily have to be
the same as your input, but we can't do it in this demo
to not make the page too convoluted. In a real world scenario, you can just use a
prompt with a suitable Japanese text to guide the model, get the style
then move on to apply it to whatever dialogue you wish your model to speak.
The pitch information in my data was accurately calculated, but it only works in comparison to the other speakers
so you may find a deep pitch may not be exactly too deep; although it actually is
when you compare it to others within the same data, also some of the gender labels
are inaccurate since we used a model to annotate them.
\n
The main goal of this inference method is to demonstrate that style can be mapped to description's embeddings
yielding reasonably good results.
Overall, I'm confident that with a bit of experimentation, you can achieve impressive results.
The model should work well out of the box 90% of the time without the need for extensive tweaking.
However, here are some tips in case you encounter issues:
この作業は、典型的なスピーチモデルとは少し異なります。生成プロセスに対して高い制御を提供するため、意図せずに
比較的にクオリティーの低い出力を生成してしまうことが容易です。
KotodamaとDiffusionサンプラーは、入力に沿ったものを生成するための大きな助けとなりますが、
万全というわけではありません。良いアウトプットが出ない場合は、ディフュージョンサンプラーをオフにするか、非常に低い値に設定してください。
プロンプトエンコーダも非常に実験的であり、概念実証として扱うべきです。女性話者対男性話者の比率が圧倒的で、
また話者とその表現に大きなバリエーションがあるため、エンコーダは質の低い出力を生成する可能性があります。
例えば、高い表現力は、データセットに多く含まれていた女性話者と関連付けられています。
それに、データのピッチ情報は正確に計算されましたが、それは他のスピーカーとの比較でしか機能しません...
だから、深いピッチが必ずしも深すぎるわけではないことに気づくかもしれません。
ただし、実際には、同じデータ内の他の人と比較すると、深すぎます。このインフレンスの主な目的は、
スタイルベクトルを記述にマッピングし、合理的に良い結果を得ることにあります。
全体として、少しの実験でほぼ望む結果を達成できると自信を持っています。90%のケースで、大幅な調整を必要とせず、
そのままでうまく動作するはずです。しかし、問題が発生した場合のためにいくつかのヒントがあります: