import gradio as gr import tempfile from TTS.api import TTS from huggingface_hub import hf_hub_download import torch CUDA = torch.cuda.is_available() REPO_ID = "ayymen/Coqui-TTS-Vits-shi" my_title = "Tamazight Text-to-Speech" my_description = "This model is based on [VITS](https://github.com/jaywalnut310/vits), thanks to ๐Ÿธ [Coqui.ai](https://coqui.ai/)." my_examples = [ ["โดฐโตฃโต“โต. โตŽโดฐโตโตฃโดฐโดฝโต‰โต?"], ["โตกโดฐ โตœโดฐโตŽโต–โดฐโต”โตœ โตŽโดฐ โดท โต“โดฝโดฐโต โตœโต™โดฝโต”โตœ?"], ["โดณโต โดฐโดท โดฐโดฝ โต‰โต™โต™โดณโต โต•โดฑโดฑโต‰ โต‰โตœโตœโต“ โดฝ."], ["โดฐโต”โต”โดฐโตก โต โตโต€โตŽโตŽ โตขโต“โดฝโต” โดฐโต– โต‰โต€โดทโต“โตŽโต โตโตโต–!"] ] my_inputs = [ gr.Textbox(lines=5, label="Input Text"), gr.Audio(type="filepath", label="Speaker audio for voice cloning (optional)"), gr.Checkbox(label="Split Sentences (each sentence will be generated separately)", value=True) ] my_outputs = gr.Audio(type="filepath", label="Output Audio", autoplay=True) best_model_path = hf_hub_download(repo_id=REPO_ID, filename="best_model.pth") config_path = hf_hub_download(repo_id=REPO_ID, filename="config.json") api = TTS(model_path=best_model_path, config_path=config_path).to("cuda" if CUDA else "cpu") # load voice conversion model api.load_vc_model_by_name("voice_conversion_models/multilingual/vctk/freevc24", gpu=CUDA) def tts(text: str, speaker_wav: str = None, split_sentences: bool = True): # replace oov characters text = text.replace("\n", ". ") text = text.replace("(", ",") text = text.replace(")", ",") text = text.replace(";", ",") with tempfile.NamedTemporaryFile(suffix = ".wav", delete = False) as fp: if speaker_wav: api.tts_with_vc_to_file(text, speaker_wav=speaker_wav, file_path=fp.name, split_sentences=split_sentences) else: api.tts_to_file(text, file_path=fp.name, split_sentences=split_sentences) return fp.name iface = gr.Interface( fn=tts, inputs=my_inputs, outputs=my_outputs, title=my_title, description=my_description, examples=my_examples, cache_examples=True ) iface.launch()