File size: 3,942 Bytes
38548f2
 
 
385c19b
38548f2
 
 
 
 
 
 
 
 
 
0293e4c
 
 
6db8e8a
38548f2
 
 
 
0293e4c
 
 
 
38548f2
 
 
385c19b
38548f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0293e4c
 
38548f2
 
0293e4c
38548f2
 
 
 
 
 
 
 
 
 
 
24caa86
38548f2
 
 
 
 
4a7b229
38548f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24caa86
 
 
 
 
 
0293e4c
 
4a7b229
0293e4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24caa86
 
38548f2
24caa86
4a7b229
24caa86
38548f2
24caa86
4a7b229
24caa86
38548f2
 
 
 
0293e4c
 
 
 
 
 
 
38548f2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from glob import glob
import os
from typing import Tuple
from demucs.separate import main as demucs
import gradio as gr
import numpy as np
import soundfile as sf
from configs.config import Config
from infer.modules.vc.modules import VC
from zero import zero
from model import device


@zero(duration=120)
def infer(
    exp_dir: str, original_audio: str, f0add: int, index_rate: float, protect: float
) -> Tuple[int, np.ndarray]:
    model = os.path.join(exp_dir, "model.pth")
    if not os.path.exists(model):
        raise gr.Error("Model not found")

    index = glob(f"{exp_dir}/added_*.index")
    if index:
        index = index[0]
    else:
        index = None

    base = os.path.basename(original_audio)
    base = os.path.splitext(base)[0]
    demucs(
        ["--two-stems", "vocals", "-d", str(device), "-n", "htdemucs", original_audio]
    )
    out = os.path.join("separated", "htdemucs", base, "vocals.wav")

    cfg = Config()
    vc = VC(cfg)
    vc.get_vc(model)
    _, wav_opt = vc.vc_single(
        0,
        out,
        f0add,
        None,
        "rmvpe",
        index,
        None,
        index_rate,
        3,  # this only has effect when f0_method is "harvest"
        0,
        1,
        protect,
    )

    sr = wav_opt[0]
    data = wav_opt[1]

    return sr, data


def merge(exp_dir: str, original_audio: str, vocal: Tuple[int, np.ndarray]) -> str:
    base = os.path.basename(original_audio)
    base = os.path.splitext(base)[0]
    music = os.path.join("separated", "htdemucs", base, "no_vocals.wav")

    tmp = os.path.join(exp_dir, "tmp.wav")
    sf.write(tmp, vocal[1], vocal[0])

    os.system(
        f"ffmpeg -i {music} -i {tmp} -filter_complex '[1]volume=2[a];[0][a]amix=inputs=2:duration=first:dropout_transition=2' -ac 2 -y {tmp}.merged.mp3"
    )

    return f"{tmp}.merged.mp3"


class InferenceTab:
    def __init__(self):
        pass

    def ui(self):
        gr.Markdown("# Inference")
        gr.Markdown(
            "After trained model is pruned, you can use it to infer on new music. \n"
            "Upload the original audio and adjust the F0 add value to generate the inferred audio."
        )

        with gr.Row():
            self.original_audio = gr.Audio(
                label="Upload original audio",
                type="filepath",
                show_download_button=True,
            )

            with gr.Column():
                self.f0add = gr.Slider(
                    label="F0 +/-",
                    minimum=-16,
                    maximum=16,
                    step=1,
                    value=0,
                )
                self.index_rate = gr.Slider(
                    label="Index rate",
                    minimum=-0,
                    maximum=1,
                    step=0.01,
                    value=0.5,
                )
                self.protect = gr.Slider(
                    label="Protect",
                    minimum=0,
                    maximum=1,
                    step=0.01,
                    value=0.33,
                )

            self.infer_btn = gr.Button(value="Infer", variant="primary")
        with gr.Row():
            self.infer_output = gr.Audio(
                label="Inferred audio", show_download_button=True, format="mp3"
            )
        with gr.Row():
            self.merge_output = gr.Audio(
                label="Merged audio", show_download_button=True, format="mp3"
            )

    def build(self, exp_dir: gr.Textbox):
        self.infer_btn.click(
            fn=infer,
            inputs=[
                exp_dir,
                self.original_audio,
                self.f0add,
                self.index_rate,
                self.protect,
            ],
            outputs=[self.infer_output],
        ).success(
            fn=merge,
            inputs=[exp_dir, self.original_audio, self.infer_output],
            outputs=[self.merge_output],
        )