Blane187 commited on
Commit
c80814f
1 Parent(s): c0560f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -212
app.py CHANGED
@@ -11,6 +11,7 @@ from tts_voice import tts_order_voice
11
  import edge_tts
12
  import tempfile
13
  from audio_separator.separator import Separator
 
14
  import model_handler
15
  import psutil
16
  import cpuinfo
@@ -54,14 +55,7 @@ PITCH_ALGO_OPT = [
54
  "rmvpe",
55
  "rmvpe+",
56
  ]
57
- UVR_5_MODELS = [
58
- {"model_name": "BS-Roformer-Viperx-1297", "checkpoint": "model_bs_roformer_ep_317_sdr_12.9755.ckpt"},
59
- {"model_name": "MDX23C-InstVoc HQ 2", "checkpoint": "MDX23C-8KFFT-InstVoc_HQ_2.ckpt"},
60
- {"model_name": "Kim Vocal 2", "checkpoint": "Kim_Vocal_2.onnx"},
61
- {"model_name": "5_HP-Karaoke", "checkpoint": "5_HP-Karaoke-UVR.pth"},
62
- {"model_name": "UVR-DeNoise by FoxJoy", "checkpoint": "UVR-DeNoise.pth"},
63
- {"model_name": "UVR-DeEcho-DeReverb by FoxJoy", "checkpoint": "UVR-DeEcho-DeReverb.pth"},
64
- ]
65
  MODELS = [
66
  {"model": "model.pth", "index": "model.index", "model_name": "Test Model"},
67
  ]
@@ -174,33 +168,6 @@ else:
174
  parallel_workers=8
175
  )
176
 
177
- def calculate_remaining_time(epochs, seconds_per_epoch):
178
- total_seconds = epochs * seconds_per_epoch
179
-
180
- hours = total_seconds // 3600
181
- minutes = (total_seconds % 3600) // 60
182
- seconds = total_seconds % 60
183
-
184
- if hours == 0:
185
- return f"{int(minutes)} minutes"
186
- elif hours == 1:
187
- return f"{int(hours)} hour and {int(minutes)} minutes"
188
- else:
189
- return f"{int(hours)} hours and {int(minutes)} minutes"
190
-
191
- def inf_handler(audio, model_name):
192
- model_found = False
193
- for model_info in UVR_5_MODELS:
194
- if model_info["model_name"] == model_name:
195
- separator.load_model(model_info["checkpoint"])
196
- model_found = True
197
- break
198
- if not model_found:
199
- separator.load_model()
200
- output_files = separator.separate(audio)
201
- vocals = output_files[0]
202
- inst = output_files[1]
203
- return vocals, inst
204
 
205
 
206
  def run(
@@ -277,10 +244,17 @@ def upload_model(index_file, pth_file, model_name):
277
  return "Uploaded!"
278
 
279
  with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC 💖") as app:
280
- gr.Markdown("## Ilaria RVC 💖")
281
- gr.Markdown("**Help keeping up the GPU donating on [Ko-Fi](https://ko-fi.com/ilariaowo)**")
282
  with gr.Tab("Inference"):
283
- sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True,)
 
 
 
 
 
 
 
284
  def update():
285
  print(MODELS)
286
  return gr.Dropdown(label="Model",choices=[model["model_name"] for model in MODELS],visible=True,interactive=True, value=MODELS[0]["model_name"],)
@@ -289,13 +263,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
289
  refresh_button = gr.Button("Refresh Models")
290
  refresh_button.click(update, outputs=[models_dropdown])
291
 
292
- with gr.Accordion("Ilaria TTS", open=False):
293
- text_tts = gr.Textbox(label="Text", placeholder="Hello!", lines=3, interactive=True,)
294
- dropdown_tts = gr.Dropdown(label="Language and Model",choices=list(language_dict.keys()),interactive=True, value=list(language_dict.keys())[0])
295
-
296
- button_tts = gr.Button("Speak", variant="primary",)
297
- button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=[sound_gui])
298
-
299
  with gr.Accordion("Settings", open=False):
300
  pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT,value=PITCH_ALGO_OPT[4],label="Pitch algorithm",visible=True,interactive=True,)
301
  pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
@@ -347,181 +315,18 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
347
  upload_button.click(upload_model, [index_file_upload, pth_file_upload, model_name], upload_status)
348
 
349
 
350
- with gr.Tab("Vocal Separator (UVR)"):
351
- gr.Markdown("Separate vocals and instruments from an audio file using UVR models. - This is only on CPU due to ZeroGPU being ZeroGPU :(")
352
- uvr5_audio_file = gr.Audio(label="Audio File",type="filepath")
353
-
354
- with gr.Row():
355
- uvr5_model = gr.Dropdown(label="Model", choices=[model["model_name"] for model in UVR_5_MODELS])
356
- uvr5_button = gr.Button("Separate Vocals", variant="primary",)
357
-
358
- uvr5_output_voc = gr.Audio(type="filepath", label="Output 1",)
359
- uvr5_output_inst = gr.Audio(type="filepath", label="Output 2",)
360
-
361
- uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
362
-
363
- with gr.Tab("Extra"):
364
- with gr.Accordion("Model Information", open=False):
365
- def json_to_markdown_table(json_data):
366
- table = "| Key | Value |\n| --- | --- |\n"
367
- for key, value in json_data.items():
368
- table += f"| {key} | {value} |\n"
369
- return table
370
- def model_info(name):
371
- for model in MODELS:
372
- if model["model_name"] == name:
373
- print(model["model"])
374
- info = model_handler.model_info(model["model"])
375
- info2 = {
376
- "Model Name": model["model_name"],
377
- "Model Config": info['config'],
378
- "Epochs Trained": info['epochs'],
379
- "Sample Rate": info['sr'],
380
- "Pitch Guidance": info['f0'],
381
- "Model Precision": info['size'],
382
- }
383
- return gr.Markdown(json_to_markdown_table(info2))
384
-
385
- return "Model not found"
386
- def update():
387
- print(MODELS)
388
- return gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
389
- with gr.Row():
390
- model_info_dropdown = gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
391
- refresh_button = gr.Button("Refresh Models")
392
- refresh_button.click(update, outputs=[model_info_dropdown])
393
- model_info_button = gr.Button("Get Model Information")
394
- model_info_output = gr.Textbox(value="Waiting...",label="Output", interactive=False)
395
- model_info_button.click(model_info, [model_info_dropdown], [model_info_output])
396
-
397
-
398
-
399
- with gr.Accordion("Training Time Calculator", open=False):
400
- with gr.Column():
401
- epochs_input = gr.Number(label="Number of Epochs")
402
- seconds_input = gr.Number(label="Seconds per Epoch")
403
- calculate_button = gr.Button("Calculate Time Remaining")
404
- remaining_time_output = gr.Textbox(label="Remaining Time", interactive=False)
405
-
406
- calculate_button.click(calculate_remaining_time,inputs=[epochs_input, seconds_input],outputs=[remaining_time_output])
407
-
408
- with gr.Accordion("Model Fusion", open=False):
409
- with gr.Group():
410
- def merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2):
411
- for model in MODELS:
412
- if model["model_name"] == ckpt_a:
413
- ckpt_a = model["model"]
414
- if model["model_name"] == ckpt_b:
415
- ckpt_b = model["model"]
416
-
417
- path = model_handler.merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2)
418
- if path == "Fail to merge the models. The model architectures are not the same.":
419
- return "Fail to merge the models. The model architectures are not the same."
420
- else:
421
- MODELS.append({"model": path, "index": None, "model_name": name_to_save0})
422
- return "Merged, saved as " + name_to_save0
423
-
424
- gr.Markdown(value="Strongly suggested to use only very clean models.")
425
- with gr.Row():
426
- def update():
427
- print(MODELS)
428
- return gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS]), gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
429
- refresh_button_fusion = gr.Button("Refresh Models")
430
- ckpt_a = gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS])
431
- ckpt_b = gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
432
- refresh_button_fusion.click(update, outputs=[ckpt_a, ckpt_b])
433
- alpha_a = gr.Slider(
434
- minimum=0,
435
- maximum=1,
436
- label="Weight of the first model over the second",
437
- value=0.5,
438
- interactive=True,
439
- )
440
- with gr.Group():
441
- with gr.Row():
442
- sr_ = gr.Radio(
443
- label="Sample rate of both models",
444
- choices=["32k","40k", "48k"],
445
- value="32k",
446
- interactive=True,
447
- )
448
- if_f0_ = gr.Radio(
449
- label="Pitch Guidance",
450
- choices=["Yes", "Nah"],
451
- value="Yes",
452
- interactive=True,
453
- )
454
- info__ = gr.Textbox(
455
- label="Add informations to the model",
456
- value="",
457
- max_lines=8,
458
- interactive=True,
459
- visible=False
460
- )
461
- name_to_save0 = gr.Textbox(
462
- label="Final Model name",
463
- value="",
464
- max_lines=1,
465
- interactive=True,
466
- )
467
- version_2 = gr.Radio(
468
- label="Versions of the models",
469
- choices=["v1", "v2"],
470
- value="v2",
471
- interactive=True,
472
- )
473
- with gr.Group():
474
- with gr.Row():
475
- but6 = gr.Button("Fuse the two models", variant="primary")
476
- info4 = gr.Textbox(label="Output", value="", max_lines=8)
477
- but6.click(
478
- merge,
479
- [ckpt_a,ckpt_b,alpha_a,sr_,if_f0_,info__,name_to_save0,version_2,],info4,api_name="ckpt_merge",)
480
-
481
- with gr.Accordion("Model Quantization", open=False):
482
- gr.Markdown("Quantize the model to a lower precision. - soon™ or never™ 😎")
483
-
484
- with gr.Accordion("Debug", open=False):
485
- def json_to_markdown_table(json_data):
486
- table = "| Key | Value |\n| --- | --- |\n"
487
- for key, value in json_data.items():
488
- table += f"| {key} | {value} |\n"
489
- return table
490
- gr.Markdown("View the models that are currently loaded in the instance.")
491
-
492
- gr.Markdown(json_to_markdown_table({"Models": len(MODELS), "UVR Models": len(UVR_5_MODELS)}))
493
-
494
- gr.Markdown("View the current status of the instance.")
495
- status = {
496
- "Status": "Running", # duh lol
497
- "Models": len(MODELS),
498
- "UVR Models": len(UVR_5_MODELS),
499
- "CPU Usage": f"{psutil.cpu_percent()}%",
500
- "RAM Usage": f"{psutil.virtual_memory().percent}%",
501
- "CPU": f"{cpuinfo.get_cpu_info()['brand_raw']}",
502
- "System Uptime": f"{round(time.time() - psutil.boot_time(), 2)} seconds",
503
- "System Load Average": f"{psutil.getloadavg()}",
504
- "====================": "====================",
505
- "CPU Cores": psutil.cpu_count(),
506
- "CPU Threads": psutil.cpu_count(logical=True),
507
- "RAM Total": f"{round(psutil.virtual_memory().total / 1024**3, 2)} GB",
508
- "RAM Used": f"{round(psutil.virtual_memory().used / 1024**3, 2)} GB",
509
- "CPU Frequency": f"{psutil.cpu_freq().current} MHz",
510
- "====================": "====================",
511
- "GPU": "A100 - Do a request (Inference, you won't see it either way)",
512
- }
513
- gr.Markdown(json_to_markdown_table(status))
514
-
515
  with gr.Tab("Credits"):
516
  gr.Markdown(
517
  """
 
 
518
  Ilaria RVC made by [Ilaria](https://huggingface.co/TheStinger) suport her on [ko-fi](https://ko-fi.com/ilariaowo)
519
 
520
- The Inference code is made by [r3gm](https://huggingface.co/r3gm) (his module helped form this space 💖)
521
 
522
  made with ❤️ by [mikus](https://github.com/cappuch) - made the ui!
523
 
524
- ## In loving memory of JLabDX 🕊️
525
  """
526
  )
527
  with gr.Tab(("")):
 
11
  import edge_tts
12
  import tempfile
13
  from audio_separator.separator import Separator
14
+ from animalesepy import *
15
  import model_handler
16
  import psutil
17
  import cpuinfo
 
55
  "rmvpe",
56
  "rmvpe+",
57
  ]
58
+
 
 
 
 
 
 
 
59
  MODELS = [
60
  {"model": "model.pth", "index": "model.index", "model_name": "Test Model"},
61
  ]
 
168
  parallel_workers=8
169
  )
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
 
173
  def run(
 
244
  return "Uploaded!"
245
 
246
  with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC 💖") as app:
247
+ gr.Markdown("## Animalese RVC 💖")
248
+ gr.Markdown("**this project is forked of Ilaria RVC!**")
249
  with gr.Tab("Inference"):
250
+ text_input = gr.Textbox(label="Input Text", placeholder="Enter text to convert to Animalese")
251
+ shorten_input = gr.Checkbox(label="Shorten Words")
252
+ pitch_input = gr.Slider(minimum=0.2, maximum=2.0, step=0.1, value=1.0, label="Pitch")
253
+ preview_button = gr.Button("Preview!")
254
+ sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True)
255
+ preview_button.click(fn=lambda text, shorten, pitch: preview_audio(generate_audio(text, shorten, pitch)),
256
+ inputs=[text_input, shorten_input, pitch_input],
257
+ outputs=sound_gui)
258
  def update():
259
  print(MODELS)
260
  return gr.Dropdown(label="Model",choices=[model["model_name"] for model in MODELS],visible=True,interactive=True, value=MODELS[0]["model_name"],)
 
263
  refresh_button = gr.Button("Refresh Models")
264
  refresh_button.click(update, outputs=[models_dropdown])
265
 
266
+
 
 
 
 
 
 
267
  with gr.Accordion("Settings", open=False):
268
  pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT,value=PITCH_ALGO_OPT[4],label="Pitch algorithm",visible=True,interactive=True,)
269
  pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
 
315
  upload_button.click(upload_model, [index_file_upload, pth_file_upload, model_name], upload_status)
316
 
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  with gr.Tab("Credits"):
319
  gr.Markdown(
320
  """
321
+ Animalese RVC made by [Blane187](https://huggingface.co/Blane187)
322
+
323
  Ilaria RVC made by [Ilaria](https://huggingface.co/TheStinger) suport her on [ko-fi](https://ko-fi.com/ilariaowo)
324
 
325
+ The modules made by [r3gm](https://huggingface.co/r3gm) (
326
 
327
  made with ❤️ by [mikus](https://github.com/cappuch) - made the ui!
328
 
329
+
330
  """
331
  )
332
  with gr.Tab(("")):