File size: 2,248 Bytes
347623b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c97028c
347623b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96638c7
347623b
c97028c
 
 
 
 
 
 
 
 
 
 
347623b
c97028c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import io
import os

os.system("wget -P hubert/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt")
import gradio as gr
import librosa
import numpy as np
import soundfile
from inference.infer_tool import Svc
import logging

logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('markdown_it').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('matplotlib').setLevel(logging.WARNING)

model = Svc("logs/44k/G_199200.pth", "logs/44k/config.json", cluster_model_path="logs/44k/kmeans_10000.pt")

def predict(input_audio, not_singing):
    if input_audio is None:
        return "You need to upload an audio", None
    sampling_rate, audio = input_audio
    duration = audio.shape[0] / sampling_rate
    if duration > 45:
        return "Please upload audio less than 45 seconds", None
    audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
    if len(audio.shape) > 1:
        audio = librosa.to_mono(audio.transpose(1, 0))
    if sampling_rate != 16000:
        audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
    print(audio.shape)
    out_wav_path = "temp.wav"
    soundfile.write(out_wav_path, audio, 16000, format="wav")
    out_audio, out_sr = model.infer("aimodel", 0, out_wav_path,
                                   cluster_infer_ratio=0,
                                   auto_predict_f0=not_singing,
                                   noice_scale=0.4
                                   )
    return (44100, out_audio.numpy())

audio_input = gr.Audio(label="Upload Audio")
not_singing = gr.Checkbox(label="Check this box if this audio is not singing", value=False)
audio_output = gr.Audio(label="Output Audio")
demo = gr.Interface(predict, inputs=[audio_input, not_singing], outputs=[audio_output])
# app = gr.Blocks()
# with app:
#     audio_input = gr.Audio(label="Upload Audio")
#     not_singing = gr.Checkbox(label="Check this box if this audio is not singing", value=False)
#     audio_output = gr.Audio(label="Output Audio")
#     submit_btn = gr.Button("Submit", variant="primary")
#     submit_btn.click(predict, [audio_input, not_singing], [audio_output], api_name="predict")

demo.launch()