Politrees commited on
Commit
e1e1b10
·
verified ·
1 Parent(s): 1657aae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -123,7 +123,7 @@ def prepare_output_dir(input_file, output_dir):
123
  raise RuntimeError(f"Failed to prepare output directory {out_dir}: {e}")
124
  return out_dir
125
 
126
- def roformer_separator(audio, model_key, seg_size, override_seg_size, batch_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
127
  """Separate audio using Roformer model."""
128
  base_name = os.path.splitext(os.path.basename(audio))[0]
129
  print_message(audio, model_key)
@@ -159,7 +159,7 @@ def roformer_separator(audio, model_key, seg_size, override_seg_size, batch_size
159
  except Exception as e:
160
  raise RuntimeError(f"Roformer separation failed: {e}") from e
161
 
162
- def mdx23c_separator(audio, model, seg_size, override_seg_size, batch_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
163
  """Separate audio using MDX23C model."""
164
  base_name = os.path.splitext(os.path.basename(audio))[0]
165
  print_message(audio, model)
@@ -194,7 +194,7 @@ def mdx23c_separator(audio, model, seg_size, override_seg_size, batch_size, over
194
  except Exception as e:
195
  raise RuntimeError(f"MDX23C separation failed: {e}") from e
196
 
197
- def mdx_separator(audio, model, hop_length, seg_size, overlap, batch_size, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
198
  """Separate audio using MDX-NET model."""
199
  base_name = os.path.splitext(os.path.basename(audio))[0]
200
  print_message(audio, model)
@@ -229,7 +229,7 @@ def mdx_separator(audio, model, hop_length, seg_size, overlap, batch_size, denoi
229
  except Exception as e:
230
  raise RuntimeError(f"MDX-NET separation failed: {e}") from e
231
 
232
- def vr_separator(audio, model, batch_size, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
233
  """Separate audio using VR ARCH model."""
234
  base_name = os.path.splitext(os.path.basename(audio))[0]
235
  print_message(audio, model)
@@ -330,6 +330,8 @@ with gr.Blocks(
330
  with gr.Row():
331
  norm_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.9, label="Normalization threshold", info="The threshold for audio normalization.")
332
  amp_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.6, label="Amplification threshold", info="The threshold for audio amplification.")
 
 
333
 
334
  with gr.Tab("Roformer"):
335
  with gr.Group():
@@ -338,7 +340,6 @@ with gr.Blocks(
338
  with gr.Row():
339
  roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
340
  roformer_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
341
- roformer_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
342
  roformer_overlap = gr.Slider(minimum=2, maximum=10, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Lower is better but slower.")
343
  roformer_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
344
  with gr.Row():
@@ -356,7 +357,6 @@ with gr.Blocks(
356
  with gr.Row():
357
  mdx23c_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
358
  mdx23c_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
359
- mdx23c_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
360
  mdx23c_overlap = gr.Slider(minimum=2, maximum=50, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Higher is better but slower.")
361
  mdx23c_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
362
  with gr.Row():
@@ -375,7 +375,6 @@ with gr.Blocks(
375
  mdx_hop_length = gr.Slider(minimum=32, maximum=2048, step=32, value=1024, label="Hop Length", info="Usually called stride in neural networks; only change if you know what you're doing.")
376
  mdx_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
377
  mdx_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap", info="Amount of overlap between prediction windows. Higher is better but slower.")
378
- mdx_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
379
  mdx_denoise = gr.Checkbox(value=False, label="Denoise", info="Enable denoising after separation.")
380
  with gr.Row():
381
  mdx_audio = gr.Audio(label="Input Audio", type="filepath")
@@ -390,7 +389,6 @@ with gr.Blocks(
390
  with gr.Row():
391
  vr_model = gr.Dropdown(label="Select the Model", choices=VR_ARCH_MODELS)
392
  with gr.Row():
393
- vr_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
394
  vr_window_size = gr.Slider(minimum=320, maximum=1024, step=32, value=512, label="Window Size", info="Balance quality and speed. 1024 = fast but lower, 320 = slower but better quality.")
395
  vr_aggression = gr.Slider(minimum=1, maximum=50, step=1, value=5, label="Agression", info="Intensity of primary stem extraction.")
396
  vr_tta = gr.Checkbox(value=False, label="TTA", info="Enable Test-Time-Augmentation; slow but improves quality.")
@@ -437,7 +435,6 @@ with gr.Blocks(
437
  roformer_model,
438
  roformer_seg_size,
439
  roformer_override_seg_size,
440
- roformer_batch_size,
441
  roformer_overlap,
442
  roformer_pitch_shift,
443
  model_file_dir,
@@ -445,6 +442,7 @@ with gr.Blocks(
445
  output_format,
446
  norm_threshold,
447
  amp_threshold,
 
448
  ],
449
  outputs=[roformer_stem1, roformer_stem2],
450
  )
@@ -455,7 +453,6 @@ with gr.Blocks(
455
  mdx23c_model,
456
  mdx23c_seg_size,
457
  mdx23c_override_seg_size,
458
- mdx23c_batch_size,
459
  mdx23c_overlap,
460
  mdx23c_pitch_shift,
461
  model_file_dir,
@@ -463,6 +460,7 @@ with gr.Blocks(
463
  output_format,
464
  norm_threshold,
465
  amp_threshold,
 
466
  ],
467
  outputs=[mdx23c_stem1, mdx23c_stem2],
468
  )
@@ -474,13 +472,13 @@ with gr.Blocks(
474
  mdx_hop_length,
475
  mdx_seg_size,
476
  mdx_overlap,
477
- mdx_batch_size,
478
  mdx_denoise,
479
  model_file_dir,
480
  output_dir,
481
  output_format,
482
  norm_threshold,
483
  amp_threshold,
 
484
  ],
485
  outputs=[mdx_stem1, mdx_stem2],
486
  )
@@ -489,7 +487,6 @@ with gr.Blocks(
489
  inputs=[
490
  vr_audio,
491
  vr_model,
492
- vr_batch_size,
493
  vr_window_size,
494
  vr_aggression,
495
  vr_tta,
@@ -501,6 +498,7 @@ with gr.Blocks(
501
  output_format,
502
  norm_threshold,
503
  amp_threshold,
 
504
  ],
505
  outputs=[vr_stem1, vr_stem2],
506
  )
 
123
  raise RuntimeError(f"Failed to prepare output directory {out_dir}: {e}")
124
  return out_dir
125
 
126
+ def roformer_separator(audio, model_key, seg_size, override_seg_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, batch_size, progress=gr.Progress(track_tqdm=True)):
127
  """Separate audio using Roformer model."""
128
  base_name = os.path.splitext(os.path.basename(audio))[0]
129
  print_message(audio, model_key)
 
159
  except Exception as e:
160
  raise RuntimeError(f"Roformer separation failed: {e}") from e
161
 
162
+ def mdx23c_separator(audio, model, seg_size, override_seg_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, batch_size, progress=gr.Progress(track_tqdm=True)):
163
  """Separate audio using MDX23C model."""
164
  base_name = os.path.splitext(os.path.basename(audio))[0]
165
  print_message(audio, model)
 
194
  except Exception as e:
195
  raise RuntimeError(f"MDX23C separation failed: {e}") from e
196
 
197
+ def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, batch_size, progress=gr.Progress(track_tqdm=True)):
198
  """Separate audio using MDX-NET model."""
199
  base_name = os.path.splitext(os.path.basename(audio))[0]
200
  print_message(audio, model)
 
229
  except Exception as e:
230
  raise RuntimeError(f"MDX-NET separation failed: {e}") from e
231
 
232
+ def vr_separator(audio, model, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, batch_size, progress=gr.Progress(track_tqdm=True)):
233
  """Separate audio using VR ARCH model."""
234
  base_name = os.path.splitext(os.path.basename(audio))[0]
235
  print_message(audio, model)
 
330
  with gr.Row():
331
  norm_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.9, label="Normalization threshold", info="The threshold for audio normalization.")
332
  amp_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.6, label="Amplification threshold", info="The threshold for audio amplification.")
333
+ with gr.Row():
334
+ batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
335
 
336
  with gr.Tab("Roformer"):
337
  with gr.Group():
 
340
  with gr.Row():
341
  roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
342
  roformer_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
 
343
  roformer_overlap = gr.Slider(minimum=2, maximum=10, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Lower is better but slower.")
344
  roformer_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
345
  with gr.Row():
 
357
  with gr.Row():
358
  mdx23c_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
359
  mdx23c_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
 
360
  mdx23c_overlap = gr.Slider(minimum=2, maximum=50, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Higher is better but slower.")
361
  mdx23c_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
362
  with gr.Row():
 
375
  mdx_hop_length = gr.Slider(minimum=32, maximum=2048, step=32, value=1024, label="Hop Length", info="Usually called stride in neural networks; only change if you know what you're doing.")
376
  mdx_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
377
  mdx_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap", info="Amount of overlap between prediction windows. Higher is better but slower.")
 
378
  mdx_denoise = gr.Checkbox(value=False, label="Denoise", info="Enable denoising after separation.")
379
  with gr.Row():
380
  mdx_audio = gr.Audio(label="Input Audio", type="filepath")
 
389
  with gr.Row():
390
  vr_model = gr.Dropdown(label="Select the Model", choices=VR_ARCH_MODELS)
391
  with gr.Row():
 
392
  vr_window_size = gr.Slider(minimum=320, maximum=1024, step=32, value=512, label="Window Size", info="Balance quality and speed. 1024 = fast but lower, 320 = slower but better quality.")
393
  vr_aggression = gr.Slider(minimum=1, maximum=50, step=1, value=5, label="Agression", info="Intensity of primary stem extraction.")
394
  vr_tta = gr.Checkbox(value=False, label="TTA", info="Enable Test-Time-Augmentation; slow but improves quality.")
 
435
  roformer_model,
436
  roformer_seg_size,
437
  roformer_override_seg_size,
 
438
  roformer_overlap,
439
  roformer_pitch_shift,
440
  model_file_dir,
 
442
  output_format,
443
  norm_threshold,
444
  amp_threshold,
445
+ batch_size,
446
  ],
447
  outputs=[roformer_stem1, roformer_stem2],
448
  )
 
453
  mdx23c_model,
454
  mdx23c_seg_size,
455
  mdx23c_override_seg_size,
 
456
  mdx23c_overlap,
457
  mdx23c_pitch_shift,
458
  model_file_dir,
 
460
  output_format,
461
  norm_threshold,
462
  amp_threshold,
463
+ batch_size,
464
  ],
465
  outputs=[mdx23c_stem1, mdx23c_stem2],
466
  )
 
472
  mdx_hop_length,
473
  mdx_seg_size,
474
  mdx_overlap,
 
475
  mdx_denoise,
476
  model_file_dir,
477
  output_dir,
478
  output_format,
479
  norm_threshold,
480
  amp_threshold,
481
+ batch_size,
482
  ],
483
  outputs=[mdx_stem1, mdx_stem2],
484
  )
 
487
  inputs=[
488
  vr_audio,
489
  vr_model,
 
490
  vr_window_size,
491
  vr_aggression,
492
  vr_tta,
 
498
  output_format,
499
  norm_threshold,
500
  amp_threshold,
501
+ batch_size,
502
  ],
503
  outputs=[vr_stem1, vr_stem2],
504
  )