Spaces:
Build error
Build error
updated boxes
Browse files
app.py
CHANGED
@@ -59,26 +59,27 @@
|
|
59 |
# ).launch()
|
60 |
|
61 |
|
62 |
-
|
63 |
import os
|
64 |
import gradio as gr
|
65 |
from scipy.io.wavfile import write
|
66 |
import subprocess
|
67 |
import torch
|
68 |
|
69 |
-
from audio_separator import Separator
|
70 |
|
71 |
-
def inference(audio):
|
72 |
os.makedirs("out", exist_ok=True)
|
73 |
audio_path = 'test.wav'
|
74 |
write(audio_path, audio[0], audio[1])
|
75 |
-
|
76 |
-
# Check for CUDA availability
|
77 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
try:
|
81 |
-
# Using subprocess.run for better control
|
82 |
command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
|
83 |
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
84 |
print("Demucs script output:", process.stdout.decode())
|
@@ -86,9 +87,7 @@ def inference(audio):
|
|
86 |
print("Error in Demucs script:", e.stderr.decode())
|
87 |
return None
|
88 |
|
89 |
-
use_cuda = device == 'cuda'
|
90 |
try:
|
91 |
-
# Separating the stems using your custom separator
|
92 |
separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3')
|
93 |
primary_stem_path, secondary_stem_path = separator.separate()
|
94 |
except Exception as e:
|
@@ -96,42 +95,42 @@ def inference(audio):
|
|
96 |
return None
|
97 |
|
98 |
# Collecting all file paths
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
|
123 |
# Gradio Interface
|
124 |
title = "Source Separation Demo"
|
125 |
-
description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio."
|
126 |
-
|
127 |
-
audio_input = gr.Audio(type="numpy", label="Input")
|
128 |
-
checkboxes = [gr.Checkbox(label=stem) for stem in ["vocals", "bass", "drums", "other", "piano", "guitar", "lead_vocals", "backing_vocals"]]
|
129 |
-
outputs = [gr.Dynamic(custom_output_component, label=stem) for stem in ["vocals", "bass", "drums", "other", "piano", "guitar", "lead_vocals", "backing_vocals"]]
|
130 |
-
|
131 |
gr.Interface(
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
title=title,
|
136 |
-
description=description
|
|
|
137 |
).launch()
|
|
|
59 |
# ).launch()
|
60 |
|
61 |
|
|
|
62 |
import os
|
63 |
import gradio as gr
|
64 |
from scipy.io.wavfile import write
|
65 |
import subprocess
|
66 |
import torch
|
67 |
|
68 |
+
from audio_separator import Separator # Ensure this is correctly implemented
|
69 |
|
70 |
+
def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals):
|
71 |
os.makedirs("out", exist_ok=True)
|
72 |
audio_path = 'test.wav'
|
73 |
write(audio_path, audio[0], audio[1])
|
|
|
|
|
74 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
75 |
+
if device=='cuda':
|
76 |
+
use_cuda=True
|
77 |
+
print(f"Using device: {device}")
|
78 |
+
else:
|
79 |
+
use_cuda=False
|
80 |
+
print(f"Using device: {device}")
|
81 |
|
82 |
try:
|
|
|
83 |
command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
|
84 |
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
85 |
print("Demucs script output:", process.stdout.decode())
|
|
|
87 |
print("Error in Demucs script:", e.stderr.decode())
|
88 |
return None
|
89 |
|
|
|
90 |
try:
|
|
|
91 |
separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3')
|
92 |
primary_stem_path, secondary_stem_path = separator.separate()
|
93 |
except Exception as e:
|
|
|
95 |
return None
|
96 |
|
97 |
# Collecting all file paths
|
98 |
+
stem_files = {
|
99 |
+
"vocals": "./out/htdemucs_6s/test/vocals.wav",
|
100 |
+
"bass": "./out/htdemucs_6s/test/bass.wav",
|
101 |
+
"drums": "./out/htdemucs_6s/test/drums.wav",
|
102 |
+
"other": "./out/htdemucs_6s/test/other.wav",
|
103 |
+
"piano": "./out/htdemucs_6s/test/piano.wav",
|
104 |
+
"guitar": "./out/htdemucs_6s/test/guitar.wav",
|
105 |
+
"lead_vocals": primary_stem_path,
|
106 |
+
"backing_vocals": secondary_stem_path
|
107 |
+
}
|
108 |
+
|
109 |
+
# Filter out unchecked stems
|
110 |
+
selected_stems = {
|
111 |
+
"vocals": vocals,
|
112 |
+
"bass": bass,
|
113 |
+
"drums": drums,
|
114 |
+
"other": other,
|
115 |
+
"piano": piano,
|
116 |
+
"guitar": guitar,
|
117 |
+
"lead_vocals": lead_vocals,
|
118 |
+
"backing_vocals": backing_vocals
|
119 |
+
}
|
120 |
+
|
121 |
+
return [stem_files[stem] if selected_stems[stem] and os.path.isfile(stem_files[stem]) else None for stem in selected_stems]
|
122 |
+
|
123 |
+
# Checkbox for each stem
|
124 |
+
checkboxes = [gr.components.Checkbox(label=stem) for stem in ["Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]]
|
125 |
|
126 |
# Gradio Interface
|
127 |
title = "Source Separation Demo"
|
128 |
+
description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio and select the stems you want to display."
|
|
|
|
|
|
|
|
|
|
|
129 |
gr.Interface(
|
130 |
+
inference,
|
131 |
+
[gr.components.Audio(type="numpy", label="Input")] + checkboxes,
|
132 |
+
[gr.components.Audio(type="filepath", label=stem, optional=True) for stem in ["Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]],
|
133 |
title=title,
|
134 |
+
description=description,
|
135 |
+
live=True # Enable live updates
|
136 |
).launch()
|