Spaces:
Build error
Build error
File size: 3,062 Bytes
222619b c20e286 90d9164 c20e286 222619b c20e286 222619b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import importlib
import re
import gradio as gr
import yaml
from gradio.inputs import Textbox, Audio
from inference.base_tts_infer import BaseTTSInfer
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import numpy as np
from data_gen.tts.data_gen_utils import is_sil_phoneme, PUNCS
class GradioInfer:
def __init__(self, exp_name, config, inference_cls, title, description, article, example_inputs):
self.exp_name = exp_name
self.config = config
self.title = title
self.description = description
self.article = article
self.example_inputs = example_inputs
pkg = ".".join(inference_cls.split(".")[:-1])
cls_name = inference_cls.split(".")[-1]
self.inference_cls = getattr(importlib.import_module(pkg), cls_name)
def greet(self, text, audio):
sents = re.split(rf'([{PUNCS}])', text.replace('\n', ','))
if sents[-1] not in list(PUNCS):
sents = sents + ['.']
audio_outs = []
s = ""
for i in range(0, len(sents), 2):
if len(sents[i]) > 0:
s += sents[i] + sents[i + 1]
if len(s) >= 400 or (i >= len(sents) - 2 and len(s) > 0):
audio_out = self.infer_ins.infer_once({
'text': s,
'ref_audio': audio
})
audio_out = audio_out * 32767
audio_out = audio_out.astype(np.int16)
audio_outs.append(audio_out)
audio_outs.append(np.zeros(int(hp['audio_sample_rate'] * 0.3)).astype(np.int16))
s = ""
audio_outs = np.concatenate(audio_outs)
return hp['audio_sample_rate'], audio_outs
def run(self):
set_hparams(exp_name=self.exp_name, config=self.config)
infer_cls = self.inference_cls
self.infer_ins: BaseTTSInfer = infer_cls(hp)
example_inputs = self.example_inputs
for i in range(len(example_inputs)):
text, ref_audio = example_inputs[i].split('|')
print('text: ', text, 'ref_audio:', ref_audio)
example_inputs[i] = [text, ref_audio]
iface = gr.Interface(fn=self.greet,
inputs=[
Textbox(lines=10, placeholder=None, default=example_inputs[0][0], label="input text"),
Textbox(lines=10, placeholder=None, default=example_inputs[0][1], label="reference audio"),
],
outputs="audio",
allow_flagging="never",
title=self.title,
description=self.description,
article=self.article,
examples=example_inputs,
enable_queue=True)
iface.launch()
if __name__ == '__main__':
gradio_config = yaml.safe_load(open('inference/gradio/gradio_settings.yaml'))
g = GradioInfer(**gradio_config)
g.run()
|