# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR)) import argparse import gradio as gr import numpy as np import torch import torchaudio import random import librosa import logging logging.getLogger('matplotlib').setLevel(logging.WARNING) from cosyvoice.cli.cosyvoice import CosyVoice from cosyvoice.utils.file_utils import load_wav import spaces from textwrap import dedent logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s %(message)s') def generate_seed(): seed = random.randint(1, 100000000) return { "__type__": "update", "value": seed } def set_all_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) max_val = 0.8 def postprocess(speech, top_db=60, hop_length=220, win_length=440): speech, _ = librosa.effects.trim( speech, top_db=top_db, frame_length=win_length, hop_length=hop_length ) if speech.abs().max() > max_val: speech = speech / speech.abs().max() * max_val speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1) return speech inference_mode_list = ['自然语言控制'] instruct_dict = {'预训练音色': '1. 选择预训练音色\n2.点击生成音频按钮', '3s极速复刻': '1. 本地上传参考音频,或麦克风录入\n2. 输入参考音频对应的文本以及希望声音复刻的文本\n3.点击“一键开启声音复刻💕”', '跨语种复刻': '1. 本地上传参考音频,或麦克风录入\n2. **无需输入**参考音频对应的文本\n3.点击“一键开启声音复刻💕”', '自然语言控制': '1. 输入instruct文本\n2.点击生成音频按钮'} def change_instruction(mode_checkbox_group): return instruct_dict[mode_checkbox_group] @spaces.GPU(duration=70) def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, instruct_text, seed): prompt_wav_record = None tts_text = "".join([item1 for item1 in tts_text.strip().split("\n") if item1 != ""]) + ".。" print(tts_text) prompt_text = "".join([item2 for item2 in prompt_text.strip().split("\n") if item2 != ""]) #if len(tts_text)>108: # raise Exception('抱歉!你输入的文本超过了100字符,请您删减文本!') if prompt_wav_upload is not None: prompt_wav = prompt_wav_upload elif prompt_wav_record is not None: prompt_wav = prompt_wav_record else: prompt_wav = None # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode if mode_checkbox_group in ['自然语言控制']: if cosyvoice.frontend.instruct is False: gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir)) return (target_sr, default_data) if instruct_text == '': gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本') return (target_sr, default_data) if prompt_wav is not None or prompt_text != '': gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略') # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language if mode_checkbox_group in ['跨语种复刻']: if cosyvoice.frontend.instruct is True: gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir)) return (target_sr, default_data) if instruct_text != '': gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略') if prompt_wav is None: gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频') return (target_sr, default_data) gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和参考音频对应的文本为不同语言') # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']: if prompt_wav is None: gr.Warning('prompt音频为空,您是否忘记输入prompt音频?') return (target_sr, default_data) if torchaudio.info(prompt_wav).sample_rate < prompt_sr: gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr)) return (target_sr, default_data) # sft mode only use sft_dropdown if mode_checkbox_group in ['预训练音色']: if instruct_text != '' or prompt_wav is not None or prompt_text != '': gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!') # zero_shot mode only use prompt_wav prompt text if mode_checkbox_group in ['3s极速复刻']: if prompt_text == '': gr.Warning('prompt文本为空,您是否忘记输入prompt文本?') return (target_sr, default_data) if instruct_text != '': gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!') if mode_checkbox_group == '预训练音色': logging.info('get sft inference request') set_all_random_seed(seed) output = cosyvoice.inference_sft(tts_text, sft_dropdown) elif mode_checkbox_group == '3s极速复刻': logging.info('get zero_shot inference request') prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) set_all_random_seed(seed) output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k) elif mode_checkbox_group == '跨语种复刻': logging.info('get cross_lingual inference request') prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) set_all_random_seed(seed) output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k) else: logging.info('get instruct inference request') set_all_random_seed(seed) output = cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text) audio_data = output['tts_speech'].numpy().flatten() return (target_sr, audio_data) cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-Instruct') sft_spk = cosyvoice.list_avaliable_spks() prompt_sr, target_sr = 16000, 22050 default_data = np.zeros(target_sr) app = gr.Blocks(theme="JohnSmith9982/small_and_pretty") with app: gr.Markdown("#