Zeph27 commited on
Commit
79f4c54
1 Parent(s): 69fb3c7

ort nightly

Browse files
app.py CHANGED
@@ -1,287 +1,4 @@
1
- import json
2
  import os
3
- import shutil
4
- import urllib.request
5
- import zipfile
6
- import gdown
7
- from argparse import ArgumentParser
8
 
9
- import gradio as gr
10
- import spaces
11
-
12
- from src.main import song_cover_pipeline
13
-
14
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
15
-
16
- mdxnet_models_dir = 'mdxnet_models'
17
- rvc_models_dir = 'rvc_models'
18
- output_dir = 'song_output'
19
-
20
- def download_and_extract_model(model_url, model_name, progress=gr.Progress()):
21
- try:
22
- os.makedirs(rvc_models_dir, exist_ok=True)
23
-
24
- extraction_folder = os.path.join(rvc_models_dir, model_name)
25
- zip_path = os.path.join(rvc_models_dir, f'{model_name}.zip')
26
-
27
- if os.path.exists(extraction_folder):
28
- raise gr.Error(f'Voice model directory {model_name} already exists! Choose a different name for your voice model.')
29
-
30
- progress(0, desc=f'[~] Downloading voice model with name {model_name}...')
31
-
32
- try:
33
- if 'huggingface.co' in model_url:
34
- urllib.request.urlretrieve(model_url, zip_path)
35
- elif 'pixeldrain.com' in model_url:
36
- pixeldrain_id = model_url.split('/')[-1]
37
- pixeldrain_url = f'https://pixeldrain.com/api/file/{pixeldrain_id}'
38
- urllib.request.urlretrieve(pixeldrain_url, zip_path)
39
- elif 'drive.google.com' in model_url:
40
- file_id = model_url.split('/')[-2]
41
- gdown.download(id=file_id, output=zip_path, quiet=False)
42
- else:
43
- urllib.request.urlretrieve(model_url, zip_path)
44
- except Exception as download_error:
45
- raise gr.Error(f"Failed to download the model: {str(download_error)}")
46
-
47
- if not os.path.exists(zip_path):
48
- raise gr.Error(f"Failed to download the model. The zip file was not created.")
49
-
50
- progress(0.5, desc="Extracting model...")
51
- extract_zip(extraction_folder, zip_path)
52
-
53
- pth_files = [f for f in os.listdir(extraction_folder) if f.endswith('.pth')]
54
- if not pth_files:
55
- raise ValueError("No .pth file found in the downloaded model.")
56
-
57
- progress(1, desc="Model ready")
58
- return model_name
59
-
60
- except Exception as e:
61
- if os.path.exists(extraction_folder):
62
- shutil.rmtree(extraction_folder)
63
- if os.path.exists(zip_path):
64
- os.remove(zip_path)
65
- raise gr.Error(f"Error downloading or extracting model: {str(e)}")
66
-
67
- def cleanup_temp_model(model_name):
68
- temp_dir = os.path.join(rvc_models_dir, model_name)
69
- try:
70
- shutil.rmtree(temp_dir)
71
- except Exception as e:
72
- print(f"Error cleaning up temporary model files: {str(e)}")
73
-
74
- def extract_zip(extraction_folder, zip_name):
75
- os.makedirs(extraction_folder)
76
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
77
- zip_ref.extractall(extraction_folder)
78
- os.remove(zip_name)
79
-
80
- index_filepath, model_filepath = None, None
81
- for root, dirs, files in os.walk(extraction_folder):
82
- for name in files:
83
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
84
- index_filepath = os.path.join(root, name)
85
-
86
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
87
- model_filepath = os.path.join(root, name)
88
-
89
- if not model_filepath:
90
- raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
91
-
92
- # move model and index file to extraction folder
93
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
94
- if index_filepath:
95
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
96
-
97
- # remove any unnecessary nested folders
98
- for filepath in os.listdir(extraction_folder):
99
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
100
- shutil.rmtree(os.path.join(extraction_folder, filepath))
101
-
102
-
103
- def download_online_model(url, dir_name, progress=gr.Progress()):
104
- try:
105
- progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
106
- zip_name = url.split('/')[-1]
107
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
108
- if os.path.exists(extraction_folder):
109
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
110
-
111
- if 'huggingface.co' in url:
112
- urllib.request.urlretrieve(url, zip_name)
113
-
114
- if 'pixeldrain.com' in url:
115
- zip_name = dir_name + '.zip'
116
- url = f'https://pixeldrain.com/api/file/{zip_name}'
117
- urllib.request.urlretrieve(url, zip_name)
118
-
119
- elif 'drive.google.com' in url:
120
- # Extract the Google Drive file ID
121
- zip_name = dir_name + '.zip'
122
- file_id = url.split('/')[-2]
123
- output = os.path.join('.', f'{dir_name}.zip') # Adjust the output path if needed
124
- gdown.download(id=file_id, output=output, quiet=False)
125
-
126
- progress(0.5, desc='[~] Extracting zip...')
127
- extract_zip(extraction_folder, zip_name)
128
- return f'[+] {dir_name} Model successfully downloaded!'
129
-
130
- except Exception as e:
131
- raise gr.Error(str(e))
132
-
133
-
134
- def upload_local_model(zip_path, dir_name, progress=gr.Progress()):
135
- try:
136
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
137
- if os.path.exists(extraction_folder):
138
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
139
-
140
- zip_name = zip_path.name
141
- progress(0.5, desc='[~] Extracting zip...')
142
- extract_zip(extraction_folder, zip_name)
143
- return f'[+] {dir_name} Model successfully uploaded!'
144
-
145
- except Exception as e:
146
- raise gr.Error(str(e))
147
-
148
- def pub_dl_autofill(pub_models, event: gr.SelectData):
149
- return gr.Text.update(value=pub_models.loc[event.index[0], 'URL']), gr.Text.update(value=pub_models.loc[event.index[0], 'Model Name'])
150
-
151
-
152
- def swap_visibility():
153
- return gr.update(visible=True), gr.update(visible=False), gr.update(value=''), gr.update(value=None)
154
-
155
-
156
- def process_file_upload(file):
157
- return file.name, gr.update(value=file.name)
158
-
159
-
160
- def show_hop_slider(pitch_detection_algo):
161
- if pitch_detection_algo == 'mangio-crepe':
162
- return gr.update(visible=True)
163
- else:
164
- return gr.update(visible=False)
165
-
166
-
167
- def song_cover_pipeline_with_model_download(song_input, model_url, model_name, pitch, keep_files, is_webui, main_gain, backup_gain,
168
- inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
169
- protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
170
- output_format, progress=gr.Progress()):
171
- model_path = None
172
- try:
173
- model_path = download_and_extract_model(model_url, model_name, progress)
174
- print(f"Model path: {model_path}")
175
- result = song_cover_pipeline(song_input, model_path, pitch, keep_files, is_webui, main_gain, backup_gain,
176
- inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
177
- protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
178
- output_format, progress)
179
-
180
- # Clean up old folders in song_output
181
- output_folders = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f))]
182
- output_folders.sort(key=lambda x: os.path.getmtime(os.path.join(output_dir, x)))
183
-
184
- while len(output_folders) > 100:
185
- oldest_folder = output_folders.pop(0)
186
- shutil.rmtree(os.path.join(output_dir, oldest_folder))
187
-
188
- return result
189
- except gr.Error as e:
190
- return str(e), None # Return error message and None for the second output
191
- finally:
192
- if model_path:
193
- cleanup_temp_model(model_path)
194
-
195
-
196
- if __name__ == '__main__':
197
- parser = ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
198
- parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
199
- parser.add_argument("--listen", action="store_true", default=False, help="Make the WebUI reachable from your local network.")
200
- parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
201
- parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
202
- args = parser.parse_args()
203
-
204
- with gr.Blocks(title='AICoverGenWebUI', theme='NoCrypt/miku@1.2.2') as app:
205
-
206
- gr.Label('AICoverGen WebUI created with ❤️', show_label=False)
207
-
208
- # main tab
209
- with gr.Tab("Generate"):
210
-
211
- with gr.Accordion('Main Options'):
212
- with gr.Row():
213
- with gr.Column():
214
- model_url = gr.Text(label='Voice Model URL', info='Enter the URL of the voice model zip file', value='https://huggingface.co/megaaziib/my-rvc-models-collection/resolve/main/kobo.zip')
215
- model_name = gr.Text(label='Voice Model Name', info='Enter the name of the voice model', value='kobo')
216
- # rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
217
-
218
- with gr.Column() as yt_link_col:
219
- song_input = gr.Text(label='Song input', info='Link to a song on YouTube or full path to a local file. For file upload, click the button below.', value='https://youtu.be/FRh7LvlQTuA')
220
- show_file_upload_button = gr.Button('Upload file instead')
221
-
222
- with gr.Column(visible=False) as file_upload_col:
223
- local_file = gr.File(label='Audio file')
224
- song_input_file = gr.UploadButton('Upload 📂', file_types=['audio'], variant='primary')
225
- show_yt_link_button = gr.Button('Paste YouTube link/Path to local file instead')
226
- song_input_file.upload(process_file_upload, inputs=[song_input_file], outputs=[local_file, song_input])
227
-
228
- with gr.Column():
229
- pitch = gr.Slider(-24, 24, value=0, step=1, label='Pitch Change (Vocals ONLY)', info='Generally, use 12 for male to female conversions and -12 for vice-versa. (Octaves)')
230
- pitch_all = gr.Slider(-12, 12, value=0, step=1, label='Overall Pitch Change', info='Changes pitch/key of vocals and instrumentals together. Altering this slightly reduces sound quality. (Semitones)')
231
- show_file_upload_button.click(swap_visibility, outputs=[file_upload_col, yt_link_col, song_input, local_file])
232
- show_yt_link_button.click(swap_visibility, outputs=[yt_link_col, file_upload_col, song_input, local_file])
233
-
234
- with gr.Accordion('Voice conversion options', open=False):
235
- with gr.Row():
236
- index_rate = gr.Slider(0, 1, value=0.5, label='Index Rate', info="Controls how much of the AI voice's accent to keep in the vocals")
237
- filter_radius = gr.Slider(0, 7, value=3, step=1, label='Filter radius', info='If >=3: apply median filtering median filtering to the harvested pitch results. Can reduce breathiness')
238
- rms_mix_rate = gr.Slider(0, 1, value=0.25, label='RMS mix rate', info="Control how much to mimic the original vocal's loudness (0) or a fixed loudness (1)")
239
- protect = gr.Slider(0, 0.5, value=0.33, label='Protect rate', info='Protect voiceless consonants and breath sounds. Set to 0.5 to disable.')
240
- with gr.Column():
241
- f0_method = gr.Dropdown(['rmvpe', 'mangio-crepe'], value='rmvpe', label='Pitch detection algorithm', info='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals)')
242
- crepe_hop_length = gr.Slider(32, 320, value=128, step=1, visible=False, label='Crepe hop length', info='Lower values leads to longer conversions and higher risk of voice cracks, but better pitch accuracy.')
243
- f0_method.change(show_hop_slider, inputs=f0_method, outputs=crepe_hop_length)
244
- keep_files = gr.Checkbox(label='Keep intermediate files', info='Keep all audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals. Leave unchecked to save space')
245
-
246
- with gr.Accordion('Audio mixing options', open=False):
247
- gr.Markdown('### Volume Change (decibels)')
248
- with gr.Row():
249
- main_gain = gr.Slider(-20, 20, value=0, step=1, label='Main Vocals')
250
- backup_gain = gr.Slider(-20, 20, value=0, step=1, label='Backup Vocals')
251
- inst_gain = gr.Slider(-20, 20, value=0, step=1, label='Music')
252
-
253
- gr.Markdown('### Reverb Control on AI Vocals')
254
- with gr.Row():
255
- reverb_rm_size = gr.Slider(0, 1, value=0.15, label='Room size', info='The larger the room, the longer the reverb time')
256
- reverb_wet = gr.Slider(0, 1, value=0.2, label='Wetness level', info='Level of AI vocals with reverb')
257
- reverb_dry = gr.Slider(0, 1, value=0.8, label='Dryness level', info='Level of AI vocals without reverb')
258
- reverb_damping = gr.Slider(0, 1, value=0.7, label='Damping level', info='Absorption of high frequencies in the reverb')
259
-
260
- gr.Markdown('### Audio Output Format')
261
- output_format = gr.Dropdown(['mp3', 'wav'], value='mp3', label='Output file type', info='mp3: small file size, decent quality. wav: Large file size, best quality')
262
-
263
- with gr.Row():
264
- clear_btn = gr.ClearButton(value='Clear', components=[song_input, model_url, keep_files, local_file])
265
- generate_btn = gr.Button("Generate", variant='primary')
266
- with gr.Row():
267
- ai_cover = gr.Audio(label='AI Cover (Vocal Only Inference)', show_share_button=False)
268
- ai_backing = gr.Audio(label='AI Cover (Vocal Backing Inference)', show_share_button=False)
269
-
270
- is_webui = gr.Number(value=1, visible=False)
271
- generate_btn.click(song_cover_pipeline_with_model_download,
272
- inputs=[song_input, model_url, model_name, pitch, keep_files, is_webui, main_gain, backup_gain,
273
-
274
- inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
275
- protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
276
- output_format],
277
- outputs=[ai_cover, ai_backing])
278
- clear_btn.click(lambda: [0, 0, 0, 0, 0.5, 3, 0.25, 0.33, 'rmvpe', 128, 0, 0.15, 0.2, 0.8, 0.7, 'mp3', None],
279
- outputs=[pitch, main_gain, backup_gain, inst_gain, index_rate, filter_radius, rms_mix_rate,
280
- protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
281
- reverb_dry, reverb_damping, output_format, ai_cover])
282
-
283
- app.launch(
284
- share=args.share_enabled,
285
- server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
286
- server_port=args.listen_port,
287
- )
 
 
1
  import os
 
 
 
 
 
2
 
3
+ os.system("pip install ort-nightly-gpu --index-url=https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple/")
4
+ os.system("python src/webui.py")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -20,4 +20,5 @@ tqdm==4.65.0
20
  yt_dlp==2024.8.6
21
  sox==1.4.1
22
  audio-separator[gpu]==0.17.5
23
- gdown==5.2.0
 
 
20
  yt_dlp==2024.8.6
21
  sox==1.4.1
22
  audio-separator[gpu]==0.17.5
23
+ gdown==5.2.0
24
+ ort-nightly-gpu @ https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple/ort-nightly-gpu/
src/infer_pack/attentions.py CHANGED
@@ -5,9 +5,9 @@ import torch
5
  from torch import nn
6
  from torch.nn import functional as F
7
 
8
- from src.infer_pack import commons
9
- from src.infer_pack import modules
10
- from src.infer_pack.modules import LayerNorm
11
 
12
 
13
  class Encoder(nn.Module):
 
5
  from torch import nn
6
  from torch.nn import functional as F
7
 
8
+ from infer_pack import commons
9
+ from infer_pack import modules
10
+ from infer_pack.modules import LayerNorm
11
 
12
 
13
  class Encoder(nn.Module):
src/infer_pack/models.py CHANGED
@@ -3,15 +3,15 @@ from time import time as ttime
3
  import torch
4
  from torch import nn
5
  from torch.nn import functional as F
6
- from src.infer_pack import modules
7
- from src.infer_pack import attentions
8
- from src.infer_pack import commons
9
- from src.infer_pack.commons import init_weights, get_padding
10
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from src.infer_pack.commons import init_weights
13
  import numpy as np
14
- from src.infer_pack import commons
15
 
16
 
17
  class TextEncoder256(nn.Module):
 
3
  import torch
4
  from torch import nn
5
  from torch.nn import functional as F
6
+ from infer_pack import modules
7
+ from infer_pack import attentions
8
+ from infer_pack import commons
9
+ from infer_pack.commons import init_weights, get_padding
10
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
+ from infer_pack.commons import init_weights
13
  import numpy as np
14
+ from infer_pack import commons
15
 
16
 
17
  class TextEncoder256(nn.Module):
src/infer_pack/modules.py CHANGED
@@ -9,9 +9,9 @@ from torch.nn import functional as F
9
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
  from torch.nn.utils import weight_norm, remove_weight_norm
11
 
12
- from src.infer_pack import commons
13
- from src.infer_pack.commons import init_weights, get_padding
14
- from src.infer_pack.transforms import piecewise_rational_quadratic_transform
15
 
16
 
17
  LRELU_SLOPE = 0.1
 
9
  from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
  from torch.nn.utils import weight_norm, remove_weight_norm
11
 
12
+ from infer_pack import commons
13
+ from infer_pack.commons import init_weights, get_padding
14
+ from infer_pack.transforms import piecewise_rational_quadratic_transform
15
 
16
 
17
  LRELU_SLOPE = 0.1
src/main.py CHANGED
@@ -18,8 +18,8 @@ from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter
18
  from pedalboard.io import AudioFile
19
  from pydub import AudioSegment
20
 
21
- from src.mdx import run_mdx, run_roformer
22
- from src.rvc import Config, load_hubert, get_vc, rvc_infer
23
 
24
  import spaces
25
 
 
18
  from pedalboard.io import AudioFile
19
  from pydub import AudioSegment
20
 
21
+ from mdx import run_mdx, run_roformer
22
+ from rvc import Config, load_hubert, get_vc, rvc_infer
23
 
24
  import spaces
25
 
src/rvc.py CHANGED
@@ -4,14 +4,14 @@ from pathlib import Path
4
  import torch
5
  from scipy.io import wavfile
6
 
7
- from src.infer_pack.models import (
8
  SynthesizerTrnMs256NSFsid,
9
  SynthesizerTrnMs256NSFsid_nono,
10
  SynthesizerTrnMs768NSFsid,
11
  SynthesizerTrnMs768NSFsid_nono,
12
  )
13
- from src.my_utils import load_audio
14
- from src.vc_infer_pipeline import VC
15
 
16
  BASE_DIR = Path(__file__).resolve().parent.parent
17
 
 
4
  import torch
5
  from scipy.io import wavfile
6
 
7
+ from infer_pack.models import (
8
  SynthesizerTrnMs256NSFsid,
9
  SynthesizerTrnMs256NSFsid_nono,
10
  SynthesizerTrnMs768NSFsid,
11
  SynthesizerTrnMs768NSFsid_nono,
12
  )
13
+ from my_utils import load_audio
14
+ from vc_infer_pipeline import VC
15
 
16
  BASE_DIR = Path(__file__).resolve().parent.parent
17
 
src/webui.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import shutil
4
+ import urllib.request
5
+ import zipfile
6
+ import gdown
7
+ from argparse import ArgumentParser
8
+
9
+ import gradio as gr
10
+ import spaces
11
+
12
+ from main import song_cover_pipeline
13
+
14
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
15
+
16
+ mdxnet_models_dir = 'mdxnet_models'
17
+ rvc_models_dir = 'rvc_models'
18
+ output_dir = 'song_output'
19
+
20
+ def download_and_extract_model(model_url, model_name, progress=gr.Progress()):
21
+ try:
22
+ os.makedirs(rvc_models_dir, exist_ok=True)
23
+
24
+ extraction_folder = os.path.join(rvc_models_dir, model_name)
25
+ zip_path = os.path.join(rvc_models_dir, f'{model_name}.zip')
26
+
27
+ if os.path.exists(extraction_folder):
28
+ raise gr.Error(f'Voice model directory {model_name} already exists! Choose a different name for your voice model.')
29
+
30
+ progress(0, desc=f'[~] Downloading voice model with name {model_name}...')
31
+
32
+ try:
33
+ if 'huggingface.co' in model_url:
34
+ urllib.request.urlretrieve(model_url, zip_path)
35
+ elif 'pixeldrain.com' in model_url:
36
+ pixeldrain_id = model_url.split('/')[-1]
37
+ pixeldrain_url = f'https://pixeldrain.com/api/file/{pixeldrain_id}'
38
+ urllib.request.urlretrieve(pixeldrain_url, zip_path)
39
+ elif 'drive.google.com' in model_url:
40
+ file_id = model_url.split('/')[-2]
41
+ gdown.download(id=file_id, output=zip_path, quiet=False)
42
+ else:
43
+ urllib.request.urlretrieve(model_url, zip_path)
44
+ except Exception as download_error:
45
+ raise gr.Error(f"Failed to download the model: {str(download_error)}")
46
+
47
+ if not os.path.exists(zip_path):
48
+ raise gr.Error(f"Failed to download the model. The zip file was not created.")
49
+
50
+ progress(0.5, desc="Extracting model...")
51
+ extract_zip(extraction_folder, zip_path)
52
+
53
+ pth_files = [f for f in os.listdir(extraction_folder) if f.endswith('.pth')]
54
+ if not pth_files:
55
+ raise ValueError("No .pth file found in the downloaded model.")
56
+
57
+ progress(1, desc="Model ready")
58
+ return model_name
59
+
60
+ except Exception as e:
61
+ if os.path.exists(extraction_folder):
62
+ shutil.rmtree(extraction_folder)
63
+ if os.path.exists(zip_path):
64
+ os.remove(zip_path)
65
+ raise gr.Error(f"Error downloading or extracting model: {str(e)}")
66
+
67
+ def cleanup_temp_model(model_name):
68
+ temp_dir = os.path.join(rvc_models_dir, model_name)
69
+ try:
70
+ shutil.rmtree(temp_dir)
71
+ except Exception as e:
72
+ print(f"Error cleaning up temporary model files: {str(e)}")
73
+
74
+ def extract_zip(extraction_folder, zip_name):
75
+ os.makedirs(extraction_folder)
76
+ with zipfile.ZipFile(zip_name, 'r') as zip_ref:
77
+ zip_ref.extractall(extraction_folder)
78
+ os.remove(zip_name)
79
+
80
+ index_filepath, model_filepath = None, None
81
+ for root, dirs, files in os.walk(extraction_folder):
82
+ for name in files:
83
+ if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
84
+ index_filepath = os.path.join(root, name)
85
+
86
+ if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
87
+ model_filepath = os.path.join(root, name)
88
+
89
+ if not model_filepath:
90
+ raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
91
+
92
+ # move model and index file to extraction folder
93
+ os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
94
+ if index_filepath:
95
+ os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
96
+
97
+ # remove any unnecessary nested folders
98
+ for filepath in os.listdir(extraction_folder):
99
+ if os.path.isdir(os.path.join(extraction_folder, filepath)):
100
+ shutil.rmtree(os.path.join(extraction_folder, filepath))
101
+
102
+
103
+ def download_online_model(url, dir_name, progress=gr.Progress()):
104
+ try:
105
+ progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
106
+ zip_name = url.split('/')[-1]
107
+ extraction_folder = os.path.join(rvc_models_dir, dir_name)
108
+ if os.path.exists(extraction_folder):
109
+ raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
110
+
111
+ if 'huggingface.co' in url:
112
+ urllib.request.urlretrieve(url, zip_name)
113
+
114
+ if 'pixeldrain.com' in url:
115
+ zip_name = dir_name + '.zip'
116
+ url = f'https://pixeldrain.com/api/file/{zip_name}'
117
+ urllib.request.urlretrieve(url, zip_name)
118
+
119
+ elif 'drive.google.com' in url:
120
+ # Extract the Google Drive file ID
121
+ zip_name = dir_name + '.zip'
122
+ file_id = url.split('/')[-2]
123
+ output = os.path.join('.', f'{dir_name}.zip') # Adjust the output path if needed
124
+ gdown.download(id=file_id, output=output, quiet=False)
125
+
126
+ progress(0.5, desc='[~] Extracting zip...')
127
+ extract_zip(extraction_folder, zip_name)
128
+ return f'[+] {dir_name} Model successfully downloaded!'
129
+
130
+ except Exception as e:
131
+ raise gr.Error(str(e))
132
+
133
+
134
+ def upload_local_model(zip_path, dir_name, progress=gr.Progress()):
135
+ try:
136
+ extraction_folder = os.path.join(rvc_models_dir, dir_name)
137
+ if os.path.exists(extraction_folder):
138
+ raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
139
+
140
+ zip_name = zip_path.name
141
+ progress(0.5, desc='[~] Extracting zip...')
142
+ extract_zip(extraction_folder, zip_name)
143
+ return f'[+] {dir_name} Model successfully uploaded!'
144
+
145
+ except Exception as e:
146
+ raise gr.Error(str(e))
147
+
148
+ def pub_dl_autofill(pub_models, event: gr.SelectData):
149
+ return gr.Text.update(value=pub_models.loc[event.index[0], 'URL']), gr.Text.update(value=pub_models.loc[event.index[0], 'Model Name'])
150
+
151
+
152
+ def swap_visibility():
153
+ return gr.update(visible=True), gr.update(visible=False), gr.update(value=''), gr.update(value=None)
154
+
155
+
156
+ def process_file_upload(file):
157
+ return file.name, gr.update(value=file.name)
158
+
159
+
160
+ def show_hop_slider(pitch_detection_algo):
161
+ if pitch_detection_algo == 'mangio-crepe':
162
+ return gr.update(visible=True)
163
+ else:
164
+ return gr.update(visible=False)
165
+
166
+
167
+ def song_cover_pipeline_with_model_download(song_input, model_url, model_name, pitch, keep_files, is_webui, main_gain, backup_gain,
168
+ inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
169
+ protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
170
+ output_format, progress=gr.Progress()):
171
+ model_path = None
172
+ try:
173
+ model_path = download_and_extract_model(model_url, model_name, progress)
174
+ print(f"Model path: {model_path}")
175
+ result = song_cover_pipeline(song_input, model_path, pitch, keep_files, is_webui, main_gain, backup_gain,
176
+ inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
177
+ protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
178
+ output_format, progress)
179
+
180
+ # Clean up old folders in song_output
181
+ output_folders = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f))]
182
+ output_folders.sort(key=lambda x: os.path.getmtime(os.path.join(output_dir, x)))
183
+
184
+ while len(output_folders) > 100:
185
+ oldest_folder = output_folders.pop(0)
186
+ shutil.rmtree(os.path.join(output_dir, oldest_folder))
187
+
188
+ return result
189
+ except gr.Error as e:
190
+ return str(e), None # Return error message and None for the second output
191
+ finally:
192
+ if model_path:
193
+ cleanup_temp_model(model_path)
194
+
195
+
196
+ if __name__ == '__main__':
197
+ parser = ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
198
+ parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
199
+ parser.add_argument("--listen", action="store_true", default=False, help="Make the WebUI reachable from your local network.")
200
+ parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
201
+ parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
202
+ args = parser.parse_args()
203
+
204
+ with gr.Blocks(title='AICoverGenWebUI', theme='NoCrypt/miku@1.2.2') as app:
205
+
206
+ gr.Label('AICoverGen WebUI created with ❤️', show_label=False)
207
+
208
+ # main tab
209
+ with gr.Tab("Generate"):
210
+
211
+ with gr.Accordion('Main Options'):
212
+ with gr.Row():
213
+ with gr.Column():
214
+ model_url = gr.Text(label='Voice Model URL', info='Enter the URL of the voice model zip file', value='https://huggingface.co/megaaziib/my-rvc-models-collection/resolve/main/kobo.zip')
215
+ model_name = gr.Text(label='Voice Model Name', info='Enter the name of the voice model', value='kobo')
216
+ # rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
217
+
218
+ with gr.Column() as yt_link_col:
219
+ song_input = gr.Text(label='Song input', info='Link to a song on YouTube or full path to a local file. For file upload, click the button below.', value='https://youtu.be/FRh7LvlQTuA')
220
+ show_file_upload_button = gr.Button('Upload file instead')
221
+
222
+ with gr.Column(visible=False) as file_upload_col:
223
+ local_file = gr.File(label='Audio file')
224
+ song_input_file = gr.UploadButton('Upload 📂', file_types=['audio'], variant='primary')
225
+ show_yt_link_button = gr.Button('Paste YouTube link/Path to local file instead')
226
+ song_input_file.upload(process_file_upload, inputs=[song_input_file], outputs=[local_file, song_input])
227
+
228
+ with gr.Column():
229
+ pitch = gr.Slider(-24, 24, value=0, step=1, label='Pitch Change (Vocals ONLY)', info='Generally, use 12 for male to female conversions and -12 for vice-versa. (Octaves)')
230
+ pitch_all = gr.Slider(-12, 12, value=0, step=1, label='Overall Pitch Change', info='Changes pitch/key of vocals and instrumentals together. Altering this slightly reduces sound quality. (Semitones)')
231
+ show_file_upload_button.click(swap_visibility, outputs=[file_upload_col, yt_link_col, song_input, local_file])
232
+ show_yt_link_button.click(swap_visibility, outputs=[yt_link_col, file_upload_col, song_input, local_file])
233
+
234
+ with gr.Accordion('Voice conversion options', open=False):
235
+ with gr.Row():
236
+ index_rate = gr.Slider(0, 1, value=0.5, label='Index Rate', info="Controls how much of the AI voice's accent to keep in the vocals")
237
+ filter_radius = gr.Slider(0, 7, value=3, step=1, label='Filter radius', info='If >=3: apply median filtering median filtering to the harvested pitch results. Can reduce breathiness')
238
+ rms_mix_rate = gr.Slider(0, 1, value=0.25, label='RMS mix rate', info="Control how much to mimic the original vocal's loudness (0) or a fixed loudness (1)")
239
+ protect = gr.Slider(0, 0.5, value=0.33, label='Protect rate', info='Protect voiceless consonants and breath sounds. Set to 0.5 to disable.')
240
+ with gr.Column():
241
+ f0_method = gr.Dropdown(['rmvpe', 'mangio-crepe'], value='rmvpe', label='Pitch detection algorithm', info='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals)')
242
+ crepe_hop_length = gr.Slider(32, 320, value=128, step=1, visible=False, label='Crepe hop length', info='Lower values leads to longer conversions and higher risk of voice cracks, but better pitch accuracy.')
243
+ f0_method.change(show_hop_slider, inputs=f0_method, outputs=crepe_hop_length)
244
+ keep_files = gr.Checkbox(label='Keep intermediate files', info='Keep all audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals. Leave unchecked to save space')
245
+
246
+ with gr.Accordion('Audio mixing options', open=False):
247
+ gr.Markdown('### Volume Change (decibels)')
248
+ with gr.Row():
249
+ main_gain = gr.Slider(-20, 20, value=0, step=1, label='Main Vocals')
250
+ backup_gain = gr.Slider(-20, 20, value=0, step=1, label='Backup Vocals')
251
+ inst_gain = gr.Slider(-20, 20, value=0, step=1, label='Music')
252
+
253
+ gr.Markdown('### Reverb Control on AI Vocals')
254
+ with gr.Row():
255
+ reverb_rm_size = gr.Slider(0, 1, value=0.15, label='Room size', info='The larger the room, the longer the reverb time')
256
+ reverb_wet = gr.Slider(0, 1, value=0.2, label='Wetness level', info='Level of AI vocals with reverb')
257
+ reverb_dry = gr.Slider(0, 1, value=0.8, label='Dryness level', info='Level of AI vocals without reverb')
258
+ reverb_damping = gr.Slider(0, 1, value=0.7, label='Damping level', info='Absorption of high frequencies in the reverb')
259
+
260
+ gr.Markdown('### Audio Output Format')
261
+ output_format = gr.Dropdown(['mp3', 'wav'], value='mp3', label='Output file type', info='mp3: small file size, decent quality. wav: Large file size, best quality')
262
+
263
+ with gr.Row():
264
+ clear_btn = gr.ClearButton(value='Clear', components=[song_input, model_url, keep_files, local_file])
265
+ generate_btn = gr.Button("Generate", variant='primary')
266
+ with gr.Row():
267
+ ai_cover = gr.Audio(label='AI Cover (Vocal Only Inference)', show_share_button=False)
268
+ ai_backing = gr.Audio(label='AI Cover (Vocal Backing Inference)', show_share_button=False)
269
+
270
+ is_webui = gr.Number(value=1, visible=False)
271
+ generate_btn.click(song_cover_pipeline_with_model_download,
272
+ inputs=[song_input, model_url, model_name, pitch, keep_files, is_webui, main_gain, backup_gain,
273
+
274
+ inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
275
+ protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
276
+ output_format],
277
+ outputs=[ai_cover, ai_backing])
278
+ clear_btn.click(lambda: [0, 0, 0, 0, 0.5, 3, 0.25, 0.33, 'rmvpe', 128, 0, 0.15, 0.2, 0.8, 0.7, 'mp3', None],
279
+ outputs=[pitch, main_gain, backup_gain, inst_gain, index_rate, filter_radius, rms_mix_rate,
280
+ protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
281
+ reverb_dry, reverb_damping, output_format, ai_cover])
282
+
283
+ app.launch(
284
+ share=args.share_enabled,
285
+ server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
286
+ server_port=args.listen_port,
287
+ )