|
import gradio as gr |
|
from infer_onnx import TTS |
|
from ruaccent import RUAccent |
|
|
|
|
|
title = "GitHub with models: https://github.com/Tera2Space/RUTTS" |
|
|
|
|
|
models = ["TeraTTS/natasha-g2p-vits", "TeraTTS/glados2-g2p-vits", "TeraTTS/glados-g2p-vits"] |
|
|
|
|
|
models = {k: TTS(k) for k in models} |
|
|
|
|
|
accentizer = RUAccent(workdir="./model/ruaccent") |
|
accentizer.load(omograph_model_size='big', use_dictionary=True, load_yo_homographs_model=True) |
|
|
|
|
|
|
|
|
|
def text_to_speech(model_name, length_scale, text, prep_text, prep_yo): |
|
if prep_text: |
|
text = accentizer.process_all(text, process_yo_omographs=prep_yo) |
|
audio = models[model_name](text, length_scale=length_scale) |
|
models[model_name].save_wav(audio, 'temp.wav') |
|
|
|
return 'temp.wav', f"Обработанный текст: '{text}'" |
|
|
|
|
|
model_choice = gr.Dropdown(choices=list(models.keys()), value="TeraTTS/natasha-g2p-vits", label="Выберите модель") |
|
input_text = gr.Textbox(label="Введите текст для синтеза речи") |
|
prep_text = gr.Checkbox(label="Предобработать", info="Хотите предобработать текст? (ударения, ё)", value=True) |
|
prep_yo = gr.Checkbox(label="BETA Ё-омографы", info="Хотите обрабатывать Ё-омографы? (все - всё)", value=True) |
|
length_scale = gr.Slider(minimum=0.1, maximum=2.0, label="Length scale (увеличить длину звучания) По умолчанию: 1.2", value=1.2) |
|
|
|
output_audio = gr.Audio(label="Аудио", type="numpy") |
|
output_text = gr.Textbox(label="Обработанный текст") |
|
|
|
iface = gr.Interface(fn=text_to_speech, inputs=[model_choice, length_scale, input_text, prep_text, prep_yo], outputs=[output_audio, output_text], title=title) |
|
iface.launch() |
|
|