Snow-White-995's picture
Update infer.py
3455f74
raw
history blame
5.33 kB
import argparse
import numpy
import numpy as np
import pydub
import torch
import commons
import utils
from models import SynthesizerTrn
from text import cleaned_text_to_sequence, get_bert
from text.cleaner import clean_text
from text.symbols import symbols
# 当前版本信息
latest_version = "2.0"
def get_net_g(model_path: str, device: str, hps):
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
).to(device)
_ = net_g.eval()
_ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
return net_g
def get_text(text, language_str, hps, device):
# 在此处实现当前版本的get_text
norm_text, phone, tone, word2ph = clean_text(text, language_str)
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
if hps.data.add_blank:
phone = commons.intersperse(phone, 0)
tone = commons.intersperse(tone, 0)
language = commons.intersperse(language, 0)
for i in range(len(word2ph)):
word2ph[i] = word2ph[i] * 2
word2ph[0] += 1
bert = get_bert(norm_text, word2ph, language_str, device)
del word2ph
assert bert.shape[-1] == len(phone), phone
if language_str == "ZH":
bert = bert
sh_bert = torch.zeros(1024, len(phone))
en_bert = torch.zeros(1024, len(phone))
elif language_str == "SH":
bert = torch.zeros(1024, len(phone))
sh_bert = bert
en_bert = torch.zeros(1024, len(phone))
elif language_str == "EN":
bert = torch.zeros(1024, len(phone))
sh_bert = torch.zeros(1024, len(phone))
en_bert = bert
else:
raise ValueError("language_str should be ZH, SH or EN")
assert bert.shape[-1] == len(phone), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
phone = torch.LongTensor(phone)
tone = torch.LongTensor(tone)
language = torch.LongTensor(language)
return bert, sh_bert, en_bert, phone, tone, language
def infer(
text,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
sid,
language,
hps,
net_g,
device,
):
bert, sh_bert, en_bert, phones, tones, lang_ids = get_text(text, language, hps, device)
with torch.no_grad():
x_tst = phones.to(device).unsqueeze(0)
tones = tones.to(device).unsqueeze(0)
lang_ids = lang_ids.to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
sh_bert = sh_bert.to(device).unsqueeze(0)
en_bert = en_bert.to(device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
del phones
speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
audio = (
net_g.infer(
x_tst,
x_tst_lengths,
speakers,
tones,
lang_ids,
bert,
sh_bert,
en_bert,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
)[0][0, 0]
.data.cpu()
.float()
.numpy()
)
del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
torch.cuda.empty_cache()
return audio
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='configs/config.json')
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--model_path', type=str, default='models/G_73000.pth')
parser.add_argument('--output', type=str, default='sample')
args = parser.parse_args()
hps = utils.get_hparams_from_file(args.config)
net_g = get_net_g(args.model_path, device=args.device, hps=hps)
# noise_scale = 0.667
# noise_scale_w = 0.8
# length_scale = 0.9
sdp_ratio = 0
noise_scale = 0.667
noise_scale_w = 0.8
length_scale = 0.9
def do_sample(texts, sid, export_tag):
audio_data = numpy.array([], dtype=numpy.float32)
for (sub_text, language) in texts:
sub_audio_data = infer(sub_text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language, hps, net_g, args.device)
audio_data = np.concatenate((audio_data, sub_audio_data))
audio_data = audio_data / numpy.abs(audio_data).max()
audio_data = audio_data * 32767
audio_data = audio_data.astype(numpy.int16)
sound = pydub.AudioSegment(audio_data, frame_rate=hps.data.sampling_rate, sample_width=audio_data.dtype.itemsize, channels=1)
export_filename = args.output + export_tag + sid + '.mp3'
sound.export(export_filename, format='mp3')
print(export_filename)
text = [('我觉得有点贵。', 'ZH'), ('so expensive, can they?', 'EN'), ('哈巨,吃不消它。', 'SH')]
do_sample(text, '小庄', '_1_')
do_sample(text, '小嘟', '_1_')
do_sample(text, 'Jane', '_1_')
do_sample(text, '小贝', '_1_')
do_sample(text, '老克勒', '_1_')
do_sample(text, '美琳', '_1_')
pass
if __name__ == "__main__":
main()