Spaces:
Build error
Build error
updated with dropdown
Browse files
app.py
CHANGED
@@ -1,25 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from scipy.io.wavfile import write
|
4 |
import subprocess
|
5 |
import torch
|
6 |
|
7 |
-
from audio_separator import Separator
|
8 |
|
9 |
def inference(audio):
|
10 |
os.makedirs("out", exist_ok=True)
|
11 |
audio_path = 'test.wav'
|
12 |
write(audio_path, audio[0], audio[1])
|
|
|
|
|
13 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
else:
|
18 |
-
use_cuda=False
|
19 |
-
print(f"Using device: {device}")
|
20 |
try:
|
21 |
-
|
22 |
-
# Using subprocess.run for better control
|
23 |
command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
|
24 |
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
25 |
print("Demucs script output:", process.stdout.decode())
|
@@ -27,8 +85,9 @@ def inference(audio):
|
|
27 |
print("Error in Demucs script:", e.stderr.decode())
|
28 |
return None
|
29 |
|
|
|
|
|
30 |
try:
|
31 |
-
# Separating the stems using your custom separator
|
32 |
separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3')
|
33 |
primary_stem_path, secondary_stem_path = separator.separate()
|
34 |
except Exception as e:
|
@@ -36,24 +95,39 @@ def inference(audio):
|
|
36 |
return None
|
37 |
|
38 |
# Collecting all file paths
|
39 |
-
|
40 |
-
files.
|
|
|
|
|
41 |
|
42 |
-
# Check if files exist
|
43 |
-
existing_files =
|
44 |
if not existing_files:
|
45 |
print("No files were created.")
|
46 |
return None
|
47 |
|
48 |
return existing_files
|
49 |
|
|
|
|
|
|
|
|
|
50 |
# Gradio Interface
|
51 |
title = "Source Separation Demo"
|
52 |
description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio."
|
|
|
|
|
|
|
|
|
|
|
53 |
gr.Interface(
|
54 |
-
inference,
|
55 |
-
|
56 |
-
|
57 |
title=title,
|
58 |
description=description,
|
|
|
59 |
).launch()
|
|
|
|
|
|
|
|
1 |
+
# import os
|
2 |
+
# import gradio as gr
|
3 |
+
# from scipy.io.wavfile import write
|
4 |
+
# import subprocess
|
5 |
+
# import torch
|
6 |
+
|
7 |
+
# from audio_separator import Separator # Ensure this is correctly implemented
|
8 |
+
|
9 |
+
# def inference(audio):
|
10 |
+
# os.makedirs("out", exist_ok=True)
|
11 |
+
# audio_path = 'test.wav'
|
12 |
+
# write(audio_path, audio[0], audio[1])
|
13 |
+
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
14 |
+
# if device=='cuda':
|
15 |
+
# use_cuda=True
|
16 |
+
# print(f"Using device: {device}")
|
17 |
+
# else:
|
18 |
+
# use_cuda=False
|
19 |
+
# print(f"Using device: {device}")
|
20 |
+
# try:
|
21 |
+
|
22 |
+
# # Using subprocess.run for better control
|
23 |
+
# command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
|
24 |
+
# process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
25 |
+
# print("Demucs script output:", process.stdout.decode())
|
26 |
+
# except subprocess.CalledProcessError as e:
|
27 |
+
# print("Error in Demucs script:", e.stderr.decode())
|
28 |
+
# return None
|
29 |
+
|
30 |
+
# try:
|
31 |
+
# # Separating the stems using your custom separator
|
32 |
+
# separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3')
|
33 |
+
# primary_stem_path, secondary_stem_path = separator.separate()
|
34 |
+
# except Exception as e:
|
35 |
+
# print("Error in custom separation:", str(e))
|
36 |
+
# return None
|
37 |
+
|
38 |
+
# # Collecting all file paths
|
39 |
+
# files = [f"./out/htdemucs_6s/test/{stem}.wav" for stem in ["vocals", "bass", "drums", "other", "piano", "guitar"]]
|
40 |
+
# files.extend([secondary_stem_path,primary_stem_path ])
|
41 |
+
|
42 |
+
# # Check if files exist
|
43 |
+
# existing_files = [file for file in files if os.path.isfile(file)]
|
44 |
+
# if not existing_files:
|
45 |
+
# print("No files were created.")
|
46 |
+
# return None
|
47 |
+
|
48 |
+
# return existing_files
|
49 |
+
|
50 |
+
# # Gradio Interface
|
51 |
+
# title = "Source Separation Demo"
|
52 |
+
# description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio."
|
53 |
+
# gr.Interface(
|
54 |
+
# inference,
|
55 |
+
# gr.components.Audio(type="numpy", label="Input"),
|
56 |
+
# [gr.components.Audio(type="filepath", label=stem) for stem in ["Full Vocals","Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals" ]],
|
57 |
+
# title=title,
|
58 |
+
# description=description,
|
59 |
+
# ).launch()
|
60 |
+
|
61 |
+
|
62 |
import os
|
63 |
import gradio as gr
|
64 |
from scipy.io.wavfile import write
|
65 |
import subprocess
|
66 |
import torch
|
67 |
|
68 |
+
from audio_separator import Separator
|
69 |
|
70 |
def inference(audio):
|
71 |
os.makedirs("out", exist_ok=True)
|
72 |
audio_path = 'test.wav'
|
73 |
write(audio_path, audio[0], audio[1])
|
74 |
+
|
75 |
+
# Check for CUDA availability
|
76 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
77 |
+
print(f"Using device: {device}")
|
78 |
+
|
79 |
+
# Run Demucs script
|
|
|
|
|
|
|
80 |
try:
|
|
|
|
|
81 |
command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
|
82 |
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
83 |
print("Demucs script output:", process.stdout.decode())
|
|
|
85 |
print("Error in Demucs script:", e.stderr.decode())
|
86 |
return None
|
87 |
|
88 |
+
# Run custom separator
|
89 |
+
use_cuda = device == 'cuda'
|
90 |
try:
|
|
|
91 |
separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3')
|
92 |
primary_stem_path, secondary_stem_path = separator.separate()
|
93 |
except Exception as e:
|
|
|
95 |
return None
|
96 |
|
97 |
# Collecting all file paths
|
98 |
+
stem_names = ["vocals", "bass", "drums", "other", "piano", "guitar", "lead_vocals", "backing_vocals"]
|
99 |
+
files = {stem: f"./out/htdemucs_6s/test/{stem}.wav" for stem in stem_names[:6]}
|
100 |
+
files["lead_vocals"] = primary_stem_path
|
101 |
+
files["backing_vocals"] = secondary_stem_path
|
102 |
|
103 |
+
# Check if files exist and return dictionary
|
104 |
+
existing_files = {stem: file for stem, file in files.items() if os.path.isfile(file)}
|
105 |
if not existing_files:
|
106 |
print("No files were created.")
|
107 |
return None
|
108 |
|
109 |
return existing_files
|
110 |
|
111 |
+
# Function to return selected audio
|
112 |
+
def get_selected_audio(stems, selected_stem):
|
113 |
+
return stems.get(selected_stem, None)
|
114 |
+
|
115 |
# Gradio Interface
|
116 |
title = "Source Separation Demo"
|
117 |
description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio."
|
118 |
+
|
119 |
+
audio_input = gr.components.Audio(type="numpy", label="Input")
|
120 |
+
dropdown = gr.components.Dropdown(label="Select Stem", choices=["vocals", "bass", "drums", "other", "piano", "guitar", "lead_vocals", "backing_vocals"])
|
121 |
+
audio_output = gr.components.Audio(type="filepath", label="Output")
|
122 |
+
|
123 |
gr.Interface(
|
124 |
+
fn=lambda audio, stem: get_selected_audio(inference(audio), stem),
|
125 |
+
inputs=[audio_input, dropdown],
|
126 |
+
outputs=audio_output,
|
127 |
title=title,
|
128 |
description=description,
|
129 |
+
live=True # Enable live update for dynamic output
|
130 |
).launch()
|
131 |
+
|
132 |
+
|
133 |
+
|