import os
import sys
from music.search import get_youtube, download_random
from utils.utils import log_execution_time
from vits.models import SynthesizerInfer
import whisper.inference
from omegaconf import OmegaConf
import torchcrepe
import torch
import gradio as gr
import librosa
import numpy as np
import soundfile
from pydub import AudioSegment
import uuid
from torchspleeter.utils import sound_split
from torchspleeter.splitter import Splitter
import logging
logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('markdown_it').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('matplotlib').setLevel(logging.WARNING)
def load_svc_model(checkpoint_path, model):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
saved_state_dict = checkpoint_dict["model_g"]
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items():
new_state_dict[k] = saved_state_dict[k]
model.load_state_dict(new_state_dict)
return model
@log_execution_time
def compute_f0_nn(filename, device):
audio, sr = librosa.load(filename, sr=16000)
assert sr == 16000
# Load audio
audio = torch.tensor(np.copy(audio))[None]
# Here we'll use a 20 millisecond hop length
hop_length = 320
# Provide a sensible frequency range for your domain (upper limit is 2006 Hz)
# This would be a reasonable range for speech
fmin = 50
fmax = 1000
# Select a model capacity--one of "tiny" or "full"
model = "tiny"
# Pick a batch size that doesn't cause memory errors on your gpu
batch_size = 512
# Compute pitch using first gpu
pitch, periodicity = torchcrepe.predict(
audio,
sr,
hop_length,
fmin,
fmax,
model,
batch_size=batch_size,
device=device,
return_periodicity=True,
)
pitch = np.repeat(pitch, 2, -1) # 320 -> 160 * 2
periodicity = np.repeat(periodicity, 2, -1) # 320 -> 160 * 2
# CREPE was not trained on silent audio. some error on silent need filter.
periodicity = torchcrepe.filter.median(periodicity, 9)
pitch = torchcrepe.filter.mean(pitch, 9)
pitch[periodicity < 0.1] = 0
pitch = pitch.squeeze(0)
return pitch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
hp = OmegaConf.load("configs/base.yaml")
model = SynthesizerInfer(
hp.data.filter_length // 2 + 1,
hp.data.segment_size // hp.data.hop_length,
hp)
load_svc_model("vits_pretrain/sovits5.0-48k-debug.pth", model)
model.eval()
model.to(device)
model.enc_p = torch.quantization.quantize_dynamic(model.enc_p, {torch.nn.Linear}, dtype=torch.qint8)
whisper_model = whisper.inference.load_model(os.path.join("whisper_pretrain", "medium.pt"))
whisper_quant_model = torch.quantization.quantize_dynamic(
whisper_model, {torch.nn.Linear}, dtype=torch.qint8
)
splitter_model = Splitter.from_pretrained(os.path.join("torchspleeter/models/2stems", "spleeter.pth")).to(device).eval()
splitter_quant_model = torch.quantization.quantize_dynamic(
splitter_model, {torch.nn.Linear}, dtype=torch.qint8
)
# warm up
# separator.separate_to_file('warm.wav', '/tmp/warm')
@log_execution_time
def svc_change(argswave, argsspk):
argsppg = "svc_tmp_quant.ppg.npy"
# whisper.inference.pred_ppg(whisper_model, argswave, argsppg)
whisper.inference.pred_ppg(whisper_quant_model, argswave, argsppg)
# os.system(f"python whisper/inference.py -w {argswave} -p {argsppg}")
spk = np.load(argsspk)
spk = torch.FloatTensor(spk)
ppg = np.load(argsppg)
ppg = np.repeat(ppg, 2, 0) # 320 PPG -> 160 * 2
ppg = torch.FloatTensor(ppg)
pit = compute_f0_nn(argswave, device)
pit = torch.FloatTensor(pit)
len_pit = pit.size()[0]
len_ppg = ppg.size()[0]
len_min = min(len_pit, len_ppg)
pit = pit[:len_min]
ppg = ppg[:len_min, :]
with torch.no_grad():
spk = spk.unsqueeze(0).to(device)
source = pit.unsqueeze(0).to(device)
source = model.pitch2source(source)
hop_size = hp.data.hop_length
all_frame = len_min
hop_frame = 10
out_chunk = 2500 # 25 S
out_index = 0
out_audio = []
has_audio = False
while out_index + out_chunk < all_frame:
has_audio = True
if out_index == 0: # start frame
cut_s = out_index
cut_s_48k = 0
else:
cut_s = out_index - hop_frame
cut_s_48k = hop_frame * hop_size
if out_index + out_chunk + hop_frame > all_frame: # end frame
cut_e = out_index + out_chunk
cut_e_48k = 0
else:
cut_e = out_index + out_chunk + hop_frame
cut_e_48k = -1 * hop_frame * hop_size
sub_ppg = ppg[cut_s:cut_e, :].unsqueeze(0).to(device)
sub_pit = pit[cut_s:cut_e].unsqueeze(0).to(device)
sub_len = torch.LongTensor([cut_e - cut_s]).to(device)
sub_har = source[:, :, cut_s *
hop_size:cut_e * hop_size].to(device)
sub_out = model.inference(sub_ppg, sub_pit, spk, sub_len, sub_har)
sub_out = sub_out[0, 0].data.cpu().detach().numpy()
sub_out = sub_out[cut_s_48k:cut_e_48k]
out_audio.extend(sub_out)
out_index = out_index + out_chunk
if out_index < all_frame:
if has_audio:
cut_s = out_index - hop_frame
cut_s_48k = hop_frame * hop_size
else:
cut_s = 0
cut_s_48k = 0
sub_ppg = ppg[cut_s:, :].unsqueeze(0).to(device)
sub_pit = pit[cut_s:].unsqueeze(0).to(device)
sub_len = torch.LongTensor([all_frame - cut_s]).to(device)
sub_har = source[:, :, cut_s * hop_size:].to(device)
sub_out = model.inference(sub_ppg, sub_pit, spk, sub_len, sub_har)
sub_out = sub_out[0, 0].data.cpu().detach().numpy()
sub_out = sub_out[cut_s_48k:]
out_audio.extend(sub_out)
out_audio = np.asarray(out_audio)
return out_audio
@log_execution_time
def svc_main(name, sid, input_audio):
if input_audio is None:
return "You need to upload an audio", None
sampling_rate, audio = input_audio
integer_dtypes = [np.int8, np.int16, np.int32, np.int64]
if audio.dtype in integer_dtypes:
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
uuid_value = uuid.uuid4()
uuid_string = str(uuid_value)
input_audio_tmp_file = f'{uuid_string}.wav'
tmpfile_path = f'/tmp/{uuid_string}'
if not os.path.exists(tmpfile_path):
os.makedirs(tmpfile_path)
#
# prediction = separator.separate(audio)
# vocals, accompaniment = prediction["vocals"], prediction["accompaniment"]
soundfile.write(input_audio_tmp_file, audio, sampling_rate, format="wav")
# make it 30s
song = AudioSegment.from_mp3(input_audio_tmp_file)
# pydub does things in milliseconds
length = len(song)
left_idx = length / 2 - 15 * 1000
right_idx = length / 2 + 15 * 1000
if left_idx < 0:
left_idx = 0
if right_idx > length:
right_idx = length
middle_30s = song[left_idx:right_idx]
middle_30s.export(input_audio_tmp_file, format="wav")
soundfile.read(input_audio_tmp_file, dtype='float32')
sound_split(splitter_quant_model, input_audio_tmp_file, tmpfile_path)
curr_tmp_path = tmpfile_path
vocals_filepath = os.path.join(curr_tmp_path, 'vocals.wav')
accompaniment_filepath = os.path.join(curr_tmp_path, 'accompaniment.wav')
vocals, sampling_rate = soundfile.read(vocals_filepath)
if len(vocals.shape) > 1:
vocals = librosa.to_mono(vocals.transpose(1, 0))
if sampling_rate != 16000:
vocals = librosa.resample(vocals, orig_sr=sampling_rate, target_sr=16000)
if len(vocals) > 16000 * 100:
vocals = vocals[:16000 * 100]
wav_path = os.path.join(curr_tmp_path, "temp.wav")
soundfile.write(wav_path, vocals, 16000, format="wav")
out_vocals = svc_change(wav_path, f"configs/singers/singer00{sid}.npy")
out_vocals_filepath = os.path.join(curr_tmp_path, 'out_vocals.wav')
soundfile.write(out_vocals_filepath, out_vocals, 48000, format="wav")
print(f"out_vocals_filepath: {out_vocals_filepath}")
sound1 = AudioSegment.from_file(out_vocals_filepath)
sound2 = AudioSegment.from_file(accompaniment_filepath)
played_togther = sound1.overlay(sound2)
result_path = os.path.join(curr_tmp_path, 'out_song.wav')
played_togther.export(result_path, format="wav")
print(f"result_path: {result_path}")
result, sampling_rate = soundfile.read(result_path, dtype=np.int16)
return "Success", (sampling_rate, result)
@log_execution_time
def auto_search(name):
save_music_path = '/tmp/downloaded'
if not os.path.exists(save_music_path):
os.makedirs(save_music_path)
config = {'logfilepath': 'musicdl.log', save_music_path: save_music_path, 'search_size_per_source': 5,
'proxies': {}}
save_path = os.path.join(save_music_path, name + '.mp3')
# youtube
get_youtube(name, os.path.join(save_music_path, name))
# task1 = threading.Thread(
# target=get_youtube,
# args=(name, os.path.join(save_music_path, name))
# )
# task1.start()
# task2 = threading.Thread(
# target=download_random,
# args=(name, config, save_path)
# )
# task2.start()
# task1.join(timeout=20)
# task2.join(timeout=10)
if not os.path.exists(save_path):
return "Not Found", None
signal, sampling_rate = soundfile.read(save_path, dtype=np.int16)
# signal, sampling_rate = open_audio(save_path)
return "Found a music", (sampling_rate, signal)
def main():
app = gr.Blocks()
try:
with app:
title = "Singer Voice Clone 0.1 Demo"
desc = """ small singer voice clone Demo App.
Enter keywords auto search music to clone or upload music yourself
It's just a simplified demo, you can use more advanced features optimize music quality
"""
tutorial_link = "https://docs.cworld.ai/docs/cworld-ai/quick-start-singer"
gr.HTML(
f"""