Spaces:
Running
Running
import gradio as gr | |
from separwator import * | |
def leaderboard(list_filter, list_limit): | |
try: | |
result = subprocess.run( | |
["audio-separator", "-l", f"--list_filter={list_filter}", f"--list_limit={list_limit}"], | |
capture_output=True, | |
text=True, | |
) | |
if result.returncode != 0: | |
return f"Error: {result.stderr}" | |
return "<table border='1'>" + "".join( | |
f"<tr style='{'font-weight: bold; font-size: 1.2em;' if i == 0 else ''}'>" + | |
"".join(f"<td>{cell}</td>" for cell in re.split(r"\s{2,}", line.strip())) + | |
"</tr>" | |
for i, line in enumerate(re.findall(r"^(?!-+)(.+)$", result.stdout.strip(), re.MULTILINE)) | |
) + "</table>" | |
except Exception as e: | |
return f"Error: {e}" | |
with gr.Blocks(title = "🎵 Audio Separator UI 🎵") as app: | |
with gr.Row(): | |
gr.Markdown("<h1><center> 🎵 Audio Separator UI 🎵") | |
with gr.Row(): | |
with gr.Tabs(): | |
with gr.TabItem("BS/Mel Roformer"): | |
with gr.Row(): | |
roformer_model = gr.Dropdown( | |
label = "Select the model", | |
choices = list(roformer_models.keys()), | |
value = lambda : None, | |
interactive = True | |
) | |
roformer_output_format = gr.Dropdown( | |
label = "Select the output format", | |
choices = output_format, | |
value = lambda : None, | |
interactive = True | |
) | |
with gr.Accordion("Advanced settings", open = False): | |
with gr.Group(): | |
with gr.Row(): | |
roformer_segment_size = gr.Slider( | |
label = "Segment size", | |
info = "Larger consumes more resources, but may give better results", | |
minimum = 32, | |
maximum = 4000, | |
step = 32, | |
value = 256, | |
interactive = True | |
) | |
roformer_override_segment_size = gr.Checkbox( | |
label = "Override segment size", | |
info = "Override model default segment size instead of using the model default value", | |
value = False, | |
interactive = True | |
) | |
with gr.Row(): | |
roformer_overlap = gr.Slider( | |
label = "Overlap", | |
info = "Amount of overlap between prediction windows", | |
minimum = 2, | |
maximum = 10, | |
step = 1, | |
value = 8, | |
interactive = True | |
) | |
roformer_batch_size = gr.Slider( | |
label = "Batch size", | |
info = "Larger consumes more RAM but may process slightly faster", | |
minimum = 1, | |
maximum = 16, | |
step = 1, | |
value = 1, | |
interactive = True | |
) | |
with gr.Row(): | |
roformer_normalization_threshold = gr.Slider( | |
label = "Normalization threshold", | |
info = "The threshold for audio normalization", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
roformer_amplification_threshold = gr.Slider( | |
label = "Amplification threshold", | |
info = "The threshold for audio amplification", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
with gr.Row(): | |
roformer_audio = gr.Audio( | |
label = "Input audio", | |
type = "filepath", | |
interactive = True | |
) | |
with gr.Accordion("Separation by link", open = False): | |
with gr.Row(): | |
roformer_link = gr.Textbox( | |
label = "Link", | |
placeholder = "Paste the link here", | |
interactive = True | |
) | |
with gr.Row(): | |
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)") | |
with gr.Row(): | |
roformer_download_button = gr.Button( | |
"Download!", | |
variant = "primary" | |
) | |
roformer_download_button.click(download_audio, [roformer_link], [roformer_audio]) | |
with gr.Row(): | |
roformer_button = gr.Button("Separate!", variant = "primary") | |
with gr.Row(): | |
roformer_stem1 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 1", | |
type = "filepath" | |
) | |
roformer_stem2 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 2", | |
type = "filepath" | |
) | |
roformer_button.click(roformer_separator, [roformer_audio, roformer_model, roformer_output_format, roformer_segment_size, roformer_override_segment_size, roformer_overlap, roformer_batch_size, roformer_normalization_threshold, roformer_amplification_threshold], [roformer_stem1, roformer_stem2]) | |
with gr.TabItem("MDX23C"): | |
with gr.Row(): | |
mdx23c_model = gr.Dropdown( | |
label = "Select the model", | |
choices = mdx23c_models, | |
value = lambda : None, | |
interactive = True | |
) | |
mdx23c_output_format = gr.Dropdown( | |
label = "Select the output format", | |
choices = output_format, | |
value = lambda : None, | |
interactive = True | |
) | |
with gr.Accordion("Advanced settings", open = False): | |
with gr.Group(): | |
with gr.Row(): | |
mdx23c_segment_size = gr.Slider( | |
minimum = 32, | |
maximum = 4000, | |
step = 32, | |
label = "Segment size", | |
info = "Larger consumes more resources, but may give better results", | |
value = 256, | |
interactive = True | |
) | |
mdx23c_override_segment_size = gr.Checkbox( | |
label = "Override segment size", | |
info = "Override model default segment size instead of using the model default value", | |
value = False, | |
interactive = True | |
) | |
with gr.Row(): | |
mdx23c_overlap = gr.Slider( | |
minimum = 2, | |
maximum = 50, | |
step = 1, | |
label = "Overlap", | |
info = "Amount of overlap between prediction windows", | |
value = 8, | |
interactive = True | |
) | |
mdx23c_batch_size = gr.Slider( | |
label = "Batch size", | |
info = "Larger consumes more RAM but may process slightly faster", | |
minimum = 1, | |
maximum = 16, | |
step = 1, | |
value = 1, | |
interactive = True | |
) | |
with gr.Row(): | |
mdx23c_normalization_threshold = gr.Slider( | |
label = "Normalization threshold", | |
info = "The threshold for audio normalization", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
mdx23c_amplification_threshold = gr.Slider( | |
label = "Amplification threshold", | |
info = "The threshold for audio amplification", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
with gr.Row(): | |
mdx23c_audio = gr.Audio( | |
label = "Input audio", | |
type = "filepath", | |
interactive = True | |
) | |
with gr.Accordion("Separation by link", open = False): | |
with gr.Row(): | |
mdx23c_link = gr.Textbox( | |
label = "Link", | |
placeholder = "Paste the link here", | |
interactive = True | |
) | |
with gr.Row(): | |
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)") | |
with gr.Row(): | |
mdx23c_download_button = gr.Button( | |
"Download!", | |
variant = "primary" | |
) | |
mdx23c_download_button.click(download_audio, [mdx23c_link], [mdx23c_audio]) | |
with gr.Row(): | |
mdx23c_button = gr.Button("Separate!", variant = "primary") | |
with gr.Row(): | |
mdx23c_stem1 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 1", | |
type = "filepath" | |
) | |
mdx23c_stem2 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 2", | |
type = "filepath" | |
) | |
mdx23c_button.click(mdxc_separator, [mdx23c_audio, mdx23c_model, mdx23c_output_format, mdx23c_segment_size, mdx23c_override_segment_size, mdx23c_overlap, mdx23c_batch_size, mdx23c_normalization_threshold, mdx23c_amplification_threshold], [mdx23c_stem1, mdx23c_stem2]) | |
with gr.TabItem("MDX-NET"): | |
with gr.Row(): | |
mdxnet_model = gr.Dropdown( | |
label = "Select the model", | |
choices = mdxnet_models, | |
value = lambda : None, | |
interactive = True | |
) | |
mdxnet_output_format = gr.Dropdown( | |
label = "Select the output format", | |
choices = output_format, | |
value = lambda : None, | |
interactive = True | |
) | |
with gr.Accordion("Advanced settings", open = False): | |
with gr.Group(): | |
with gr.Row(): | |
mdxnet_hop_length = gr.Slider( | |
label = "Hop length", | |
info = "Usually called stride in neural networks; only change if you know what you're doing", | |
minimum = 32, | |
maximum = 2048, | |
step = 32, | |
value = 1024, | |
interactive = True | |
) | |
mdxnet_segment_size = gr.Slider( | |
minimum = 32, | |
maximum = 4000, | |
step = 32, | |
label = "Segment size", | |
info = "Larger consumes more resources, but may give better results", | |
value = 256, | |
interactive = True | |
) | |
mdxnet_denoise = gr.Checkbox( | |
label = "Denoise", | |
info = "Enable denoising during separation", | |
value = True, | |
interactive = True | |
) | |
with gr.Row(): | |
mdxnet_overlap = gr.Slider( | |
label = "Overlap", | |
info = "Amount of overlap between prediction windows", | |
minimum = 0.001, | |
maximum = 0.999, | |
step = 0.001, | |
value = 0.25, | |
interactive = True | |
) | |
mdxnet_batch_size = gr.Slider( | |
label = "Batch size", | |
info = "Larger consumes more RAM but may process slightly faster", | |
minimum = 1, | |
maximum = 16, | |
step = 1, | |
value = 1, | |
interactive = True | |
) | |
with gr.Row(): | |
mdxnet_normalization_threshold = gr.Slider( | |
label = "Normalization threshold", | |
info = "The threshold for audio normalization", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
mdxnet_amplification_threshold = gr.Slider( | |
label = "Amplification threshold", | |
info = "The threshold for audio amplification", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
with gr.Row(): | |
mdxnet_audio = gr.Audio( | |
label = "Input audio", | |
type = "filepath", | |
interactive = True | |
) | |
with gr.Accordion("Separation by link", open = False): | |
with gr.Row(): | |
mdxnet_link = gr.Textbox( | |
label = "Link", | |
placeholder = "Paste the link here", | |
interactive = True | |
) | |
with gr.Row(): | |
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)") | |
with gr.Row(): | |
mdxnet_download_button = gr.Button( | |
"Download!", | |
variant = "primary" | |
) | |
mdxnet_download_button.click(download_audio, [mdxnet_link], [mdxnet_audio]) | |
with gr.Row(): | |
mdxnet_button = gr.Button("Separate!", variant = "primary") | |
with gr.Row(): | |
mdxnet_stem1 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 1", | |
type = "filepath" | |
) | |
mdxnet_stem2 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
label = "Stem 2", | |
type = "filepath" | |
) | |
mdxnet_button.click(mdxnet_separator, [mdxnet_audio, mdxnet_model, mdxnet_output_format, mdxnet_hop_length, mdxnet_segment_size, mdxnet_denoise, mdxnet_overlap, mdxnet_batch_size, mdxnet_normalization_threshold, mdxnet_amplification_threshold], [mdxnet_stem1, mdxnet_stem2]) | |
with gr.TabItem("VR ARCH"): | |
with gr.Row(): | |
vrarch_model = gr.Dropdown( | |
label = "Select the model", | |
choices = vrarch_models, | |
value = lambda : None, | |
interactive = True | |
) | |
vrarch_output_format = gr.Dropdown( | |
label = "Select the output format", | |
choices = output_format, | |
value = lambda : None, | |
interactive = True | |
) | |
with gr.Accordion("Advanced settings", open = False): | |
with gr.Group(): | |
with gr.Row(): | |
vrarch_window_size = gr.Slider( | |
label = "Window size", | |
info = "Balance quality and speed. 1024 = fast but lower, 320 = slower but better quality", | |
minimum=320, | |
maximum=1024, | |
step=32, | |
value = 512, | |
interactive = True | |
) | |
vrarch_agression = gr.Slider( | |
minimum = 1, | |
maximum = 50, | |
step = 1, | |
label = "Agression", | |
info = "Intensity of primary stem extraction", | |
value = 5, | |
interactive = True | |
) | |
vrarch_tta = gr.Checkbox( | |
label = "TTA", | |
info = "Enable Test-Time-Augmentation; slow but improves quality", | |
value = True, | |
visible = True, | |
interactive = True | |
) | |
with gr.Row(): | |
vrarch_post_process = gr.Checkbox( | |
label = "Post process", | |
info = "Identify leftover artifacts within vocal output; may improve separation for some songs", | |
value = False, | |
visible = True, | |
interactive = True | |
) | |
vrarch_post_process_threshold = gr.Slider( | |
label = "Post process threshold", | |
info = "Threshold for post-processing", | |
minimum = 0.1, | |
maximum = 0.3, | |
step = 0.1, | |
value = 0.2, | |
interactive = True | |
) | |
with gr.Row(): | |
vrarch_high_end_process = gr.Checkbox( | |
label = "High end process", | |
info = "Mirror the missing frequency range of the output", | |
value = False, | |
visible = True, | |
interactive = True, | |
) | |
vrarch_batch_size = gr.Slider( | |
label = "Batch size", | |
info = "Larger consumes more RAM but may process slightly faster", | |
minimum = 1, | |
maximum = 16, | |
step = 1, | |
value = 1, | |
interactive = True | |
) | |
with gr.Row(): | |
vrarch_normalization_threshold = gr.Slider( | |
label = "Normalization threshold", | |
info = "The threshold for audio normalization", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
vrarch_amplification_threshold = gr.Slider( | |
label = "Amplification threshold", | |
info = "The threshold for audio amplification", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
with gr.Row(): | |
vrarch_audio = gr.Audio( | |
label = "Input audio", | |
type = "filepath", | |
interactive = True | |
) | |
with gr.Accordion("Separation by link", open = False): | |
with gr.Row(): | |
vrarch_link = gr.Textbox( | |
label = "Link", | |
placeholder = "Paste the link here", | |
interactive = True | |
) | |
with gr.Row(): | |
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)") | |
with gr.Row(): | |
vrarch_download_button = gr.Button( | |
"Download!", | |
variant = "primary" | |
) | |
vrarch_download_button.click(download_audio, [vrarch_link], [vrarch_audio]) | |
with gr.Row(): | |
vrarch_button = gr.Button("Separate!", variant = "primary") | |
with gr.Row(): | |
vrarch_stem1 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 1" | |
) | |
vrarch_stem2 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 2" | |
) | |
vrarch_button.click(vrarch_separator, [vrarch_audio, vrarch_model, vrarch_output_format, vrarch_window_size, vrarch_agression, vrarch_tta, vrarch_post_process, vrarch_post_process_threshold, vrarch_high_end_process, vrarch_batch_size, vrarch_normalization_threshold, vrarch_amplification_threshold], [vrarch_stem1, vrarch_stem2]) | |
with gr.TabItem("Demucs"): | |
with gr.Row(): | |
demucs_model = gr.Dropdown( | |
label = "Select the model", | |
choices = demucs_models, | |
value = lambda : None, | |
interactive = True | |
) | |
demucs_output_format = gr.Dropdown( | |
label = "Select the output format", | |
choices = output_format, | |
value = lambda : None, | |
interactive = True | |
) | |
with gr.Accordion("Advanced settings", open = False): | |
with gr.Group(): | |
with gr.Row(): | |
demucs_shifts = gr.Slider( | |
label = "Shifts", | |
info = "Number of predictions with random shifts, higher = slower but better quality", | |
minimum = 1, | |
maximum = 20, | |
step = 1, | |
value = 2, | |
interactive = True | |
) | |
demucs_segment_size = gr.Slider( | |
label = "Segment size", | |
info = "Size of segments into which the audio is split. Higher = slower but better quality", | |
minimum = 1, | |
maximum = 100, | |
step = 1, | |
value = 40, | |
interactive = True | |
) | |
demucs_segments_enabled = gr.Checkbox( | |
label = "Segment-wise processing", | |
info = "Enable segment-wise processing", | |
value = True, | |
interactive = True | |
) | |
with gr.Row(): | |
demucs_overlap = gr.Slider( | |
label = "Overlap", | |
info = "Overlap between prediction windows. Higher = slower but better quality", | |
minimum=0.001, | |
maximum=0.999, | |
step=0.001, | |
value = 0.25, | |
interactive = True | |
) | |
demucs_batch_size = gr.Slider( | |
label = "Batch size", | |
info = "Larger consumes more RAM but may process slightly faster", | |
minimum = 1, | |
maximum = 16, | |
step = 1, | |
value = 1, | |
interactive = True | |
) | |
with gr.Row(): | |
demucs_normalization_threshold = gr.Slider( | |
label = "Normalization threshold", | |
info = "The threshold for audio normalization", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
demucs_amplification_threshold = gr.Slider( | |
label = "Amplification threshold", | |
info = "The threshold for audio amplification", | |
minimum = 0.1, | |
maximum = 1, | |
step = 0.1, | |
value = 0.1, | |
interactive = True | |
) | |
with gr.Row(): | |
demucs_audio = gr.Audio( | |
label = "Input audio", | |
type = "filepath", | |
interactive = True | |
) | |
with gr.Accordion("Separation by link", open = False): | |
with gr.Row(): | |
demucs_link = gr.Textbox( | |
label = "Link", | |
placeholder = "Paste the link here", | |
interactive = True | |
) | |
with gr.Row(): | |
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)") | |
with gr.Row(): | |
demucs_download_button = gr.Button( | |
"Download!", | |
variant = "primary" | |
) | |
demucs_download_button.click(download_audio, [demucs_link], [demucs_audio]) | |
with gr.Row(): | |
demucs_button = gr.Button("Separate!", variant = "primary") | |
with gr.Row(): | |
demucs_stem1 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 1" | |
) | |
demucs_stem2 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 2" | |
) | |
with gr.Row(): | |
demucs_stem3 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 3" | |
) | |
demucs_stem4 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 4" | |
) | |
with gr.Row(visible=False) as stem6: | |
demucs_stem5 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 5" | |
) | |
demucs_stem6 = gr.Audio( | |
show_download_button = True, | |
interactive = False, | |
type = "filepath", | |
label = "Stem 6" | |
) | |
demucs_model.change(update_stems, inputs=[demucs_model], outputs=stem6) | |
demucs_button.click(demucs_separator, [demucs_audio, demucs_model, demucs_output_format, demucs_shifts, demucs_segment_size, demucs_segments_enabled, demucs_overlap, demucs_batch_size, demucs_normalization_threshold, demucs_amplification_threshold], [demucs_stem1, demucs_stem2, demucs_stem3, demucs_stem4, demucs_stem5, demucs_stem6]) | |
with gr.Tab("Leaderboard"): | |
with gr.Row(equal_height=True): | |
list_filter = gr.Dropdown(value="vocals", choices=["vocals", "instrumental", "drums", "bass", "guitar", "piano", "other"], label="List filter", info="Filter and sort the model list by 'stem'") | |
list_limit = gr.Slider(minimum=1, maximum=10, step=1, value=5, label="List limit", info="Limit the number of models shown.") | |
list_button = gr.Button("Show list", variant="primary") | |
output_list = gr.HTML(label="Leaderboard") | |
with gr.TabItem("Credits"): | |
gr.Markdown( | |
""" | |
audio separator UI created by **[Eddycrack 864] & [_noxty](https://huggingface.co/theNeofr). | |
* python-audio-separator by [beveradb](https://github.com/beveradb). | |
* Thanks to [Mikus](https://github.com/cappuch) for the help with the code. | |
* Thanks to [Nick088](https://huggingface.co/Nick088) for the help to fix roformers. | |
* Thanks to [yt_dlp](https://github.com/yt-dlp/yt-dlp) devs. | |
* Separation by link source code and improvements by [_noxty](https://huggingface.co/theNeofr). | |
* Thanks to [ArisDev](https://github.com/aris-py) for porting UVR5 UI to Kaggle and improvements. | |
* Thanks to [Bebra777228](https://github.com/Bebra777228)'s code for guiding me to improve my code. | |
You can donate to the original UVR5 project here: | |
[!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/uvr5) | |
""" | |
) | |
app.queue() | |
app.launch(share=True, debug=True) |