Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
zqning commited on
Commit
fc10d73
1 Parent(s): bd442c3
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. data_pipeline/asr/faster_whisper_mp.py +179 -0
  3. data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.ckpt +3 -0
  4. data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.yaml +126 -0
  5. data_pipeline/ckpts/dnsmos_p808.onnx +3 -0
  6. data_pipeline/ckpts/wav2vec2_xlsr_300m.pth +3 -0
  7. data_pipeline/duration/duration_mutagen.py +39 -0
  8. data_pipeline/g2p_en.py +138 -0
  9. data_pipeline/g2p_es.py +143 -0
  10. data_pipeline/merge_metrics.py +64 -0
  11. data_pipeline/pipeline.sh +54 -0
  12. data_pipeline/quality/dnsmos_mp.py +266 -0
  13. data_pipeline/quality/pyannote_mp.py +152 -0
  14. data_pipeline/requirements.txt +27 -0
  15. data_pipeline/seperation/README.md +139 -0
  16. data_pipeline/seperation/configs/config_dnr_bandit_bsrnn_multi_mus64.yaml +78 -0
  17. data_pipeline/seperation/configs/config_htdemucs_6stems.yaml +127 -0
  18. data_pipeline/seperation/configs/config_musdb18_bs_roformer.yaml +134 -0
  19. data_pipeline/seperation/configs/config_musdb18_demucs3_mmi.yaml +72 -0
  20. data_pipeline/seperation/configs/config_musdb18_htdemucs.yaml +119 -0
  21. data_pipeline/seperation/configs/config_musdb18_mdx23c.yaml +182 -0
  22. data_pipeline/seperation/configs/config_musdb18_mel_band_roformer.yaml +73 -0
  23. data_pipeline/seperation/configs/config_musdb18_scnet.yaml +64 -0
  24. data_pipeline/seperation/configs/config_musdb18_segm_models.yaml +92 -0
  25. data_pipeline/seperation/configs/config_vocals_bandit_bsrnn_multi_mus64.yaml +73 -0
  26. data_pipeline/seperation/configs/config_vocals_bs_roformer.yaml +138 -0
  27. data_pipeline/seperation/configs/config_vocals_htdemucs.yaml +123 -0
  28. data_pipeline/seperation/configs/config_vocals_mdx23c.yaml +95 -0
  29. data_pipeline/seperation/configs/config_vocals_mel_band_roformer.yaml +77 -0
  30. data_pipeline/seperation/configs/config_vocals_scnet.yaml +71 -0
  31. data_pipeline/seperation/configs/config_vocals_scnet_unofficial.yaml +62 -0
  32. data_pipeline/seperation/configs/config_vocals_segm_models.yaml +78 -0
  33. data_pipeline/seperation/configs/config_vocals_swin_upernet.yaml +50 -0
  34. data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml +126 -0
  35. data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml +138 -0
  36. data_pipeline/seperation/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml +65 -0
  37. data_pipeline/seperation/dataset.py +566 -0
  38. data_pipeline/seperation/docs/augmentations.md +146 -0
  39. data_pipeline/seperation/docs/bs_roformer_info.md +145 -0
  40. data_pipeline/seperation/docs/changes.md +20 -0
  41. data_pipeline/seperation/docs/dataset_types.md +75 -0
  42. data_pipeline/seperation/inference.py +116 -0
  43. data_pipeline/seperation/inference.sh +11 -0
  44. data_pipeline/seperation/inference_mp.py +154 -0
  45. data_pipeline/seperation/inference_mp.sh +7 -0
  46. data_pipeline/seperation/models/bandit/core/__init__.py +744 -0
  47. data_pipeline/seperation/models/bandit/core/data/__init__.py +2 -0
  48. data_pipeline/seperation/models/bandit/core/data/_types.py +18 -0
  49. data_pipeline/seperation/models/bandit/core/data/augmentation.py +107 -0
  50. data_pipeline/seperation/models/bandit/core/data/augmented.py +35 -0
.gitattributes CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ data_pipeline/ssl/km_xlsr_1024_18l filter=lfs diff=lfs merge=lfs -text
60
+ data_pipeline/ssl/km_xlsr_512_18l filter=lfs diff=lfs merge=lfs -text
61
+ rap_songs.csv filter=lfs diff=lfs merge=lfs -text
data_pipeline/asr/faster_whisper_mp.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ #from tqdm import tqdm
4
+ import torch
5
+ import torch.multiprocessing as mp
6
+ import threading
7
+ #import librosa
8
+ #import numpy as np
9
+ from faster_whisper import WhisperModel
10
+ #import whisper
11
+ import glob
12
+ import fcntl
13
+ import argparse
14
+ import traceback
15
+ from tqdm import tqdm
16
+ import numpy as np
17
+ import librosa
18
+ import soxr
19
+ import multiprocessing
20
+
21
+ def normalize_audio(y, target_dbfs=0):
22
+ max_amplitude = np.max(np.abs(y))
23
+ if max_amplitude < 0.1:
24
+ return y
25
+
26
+ target_amplitude = 10.0**(target_dbfs / 20.0)
27
+ scale_factor = target_amplitude / max_amplitude
28
+
29
+ normalized_audio = y * scale_factor
30
+
31
+ return normalized_audio
32
+ file_lock = multiprocessing.Lock()
33
+
34
+ def inference(rank, ckpt_path, text_path, queue: mp.Queue):
35
+ device = f"cuda"
36
+ model = WhisperModel(ckpt_path, device=device, device_index=rank, compute_type="float16")
37
+ puncs = list(",.?!")
38
+ buffer = ""
39
+ def write_to_file(data):
40
+ with file_lock:
41
+ with open(text_path, 'a') as f:
42
+ f.write(data)
43
+
44
+
45
+ with torch.no_grad():
46
+ while True:
47
+ #print(texts)
48
+ filename = queue.get()
49
+ if filename is None:
50
+ write_to_file(buffer)
51
+ break
52
+ filename = filename[0]
53
+
54
+ try:
55
+ audio_path = filename
56
+ audio, sr = librosa.load(audio_path, sr=None)
57
+ audio = normalize_audio(audio, -6)
58
+ audio = soxr.resample(
59
+ audio,
60
+ sr,
61
+ 16000
62
+ )
63
+ segments, info = model.transcribe(audio, beam_size=3, vad_filter=True, condition_on_previous_text=False)
64
+ text = ""
65
+
66
+ for segment in segments:
67
+ text_segment = segment.text
68
+ text_segment.strip()
69
+ if len(text_segment) == 0:
70
+ continue
71
+ if not text_segment[-1] in puncs:
72
+ text_segment += ","
73
+ text = text + " " + text_segment
74
+ text = text.replace(" ", " ")
75
+ text = text.strip()
76
+ if len(text) == 0:
77
+ continue
78
+ if text[-1] == ",":
79
+ text = text[:-1] + "."
80
+
81
+ buffer += f"{filename}|{text}|{info.language}|{info.language_probability}\n"
82
+ if len(buffer) > 10000:
83
+ write_to_file(buffer)
84
+ buffer = ""
85
+
86
+ except Exception as e:
87
+ print(filename)
88
+ traceback.print_exc()
89
+
90
+
91
+ def setInterval(interval):
92
+ def decorator(function):
93
+ def wrapper(*args, **kwargs):
94
+ stopped = threading.Event()
95
+
96
+ def loop(): # executed in another thread
97
+ while not stopped.wait(interval): # until stopped
98
+ function(*args, **kwargs)
99
+
100
+ t = threading.Thread(target=loop)
101
+ t.daemon = True # stop if the program exits
102
+ t.start()
103
+ return stopped
104
+
105
+ return wrapper
106
+
107
+ return decorator
108
+
109
+ last_batches = None
110
+
111
+ @setInterval(5)
112
+ def QueueWatcher(queue, bar):
113
+ global last_batches
114
+ curr_batches = queue.qsize()
115
+ bar.update(last_batches-curr_batches)
116
+ last_batches = curr_batches
117
+
118
+ if __name__ == "__main__":
119
+ #audio_dir = sys.argv[1]
120
+ parser = argparse.ArgumentParser()
121
+ parser.add_argument("--filelist_or_dir", type=str, required=True)
122
+ parser.add_argument("--text_path", type=str, required=True, help="Dir to save output")
123
+ parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints")
124
+ parser.add_argument("--ckpt_path", type=str, required=False, default="large-v3")
125
+ parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="For aml compability")
126
+ parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="For aml compability")
127
+ args = parser.parse_args()
128
+
129
+ mp.set_start_method('spawn',force=True)
130
+
131
+ filelist_or_dir = args.filelist_or_dir
132
+ text_path = args.text_path
133
+ jobs = args.jobs
134
+ ckpt_path = args.ckpt_path
135
+ os.makedirs(text_path, exist_ok=True)
136
+ model = WhisperModel(ckpt_path, device='cpu') # download model in one thread
137
+ del(model)
138
+
139
+ if os.path.isfile(filelist_or_dir):
140
+ filelist_name = filelist_or_dir.split('/')[-1].split('.')[0]
141
+ generator = open(filelist_or_dir).read().splitlines()
142
+ text_path = os.path.join(text_path, f"{filelist_name}_text.txt")
143
+ else:
144
+ filelist_name = "single"
145
+ generator = glob.glob(f"{filelist_or_dir}/*.wav")
146
+ text_path = os.path.join(text_path, "text.txt")
147
+
148
+ os.system(f"rm {text_path}")
149
+
150
+ gpu_num = torch.cuda.device_count()
151
+
152
+ processes = []
153
+ queue = mp.Queue()
154
+ for thread_num in range(jobs):
155
+
156
+ rank = thread_num % gpu_num
157
+ p = mp.Process(target=inference, args=(rank, ckpt_path, text_path, queue))
158
+ p.start()
159
+ processes.append(p)
160
+
161
+ accum = []
162
+ tmp_file = []
163
+
164
+ for filename in generator:
165
+ accum.append(filename)
166
+ if len(accum) == 1:
167
+ queue.put(accum.copy())
168
+ accum.clear()
169
+
170
+
171
+ for _ in range(jobs):
172
+ queue.put(None)
173
+
174
+ last_batches = queue.qsize()
175
+ bar = tqdm(total=last_batches, desc='whisper')
176
+ queue_watcher = QueueWatcher(queue, bar)
177
+ for p in processes:
178
+ p.join()
179
+ queue_watcher.set()
data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b84f37e8d444c8cb30c79d77f613a41c05868ff9c9ac6c7049c00aefae115aa
3
+ size 639331213
data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.yaml ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 352800
3
+ dim_f: 1024
4
+ dim_t: 801 # don't work (use in model)
5
+ hop_length: 441 # don't work (use in model)
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.000
10
+
11
+ model:
12
+ dim: 512
13
+ depth: 12
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ freqs_per_bands: !!python/tuple
20
+ - 2
21
+ - 2
22
+ - 2
23
+ - 2
24
+ - 2
25
+ - 2
26
+ - 2
27
+ - 2
28
+ - 2
29
+ - 2
30
+ - 2
31
+ - 2
32
+ - 2
33
+ - 2
34
+ - 2
35
+ - 2
36
+ - 2
37
+ - 2
38
+ - 2
39
+ - 2
40
+ - 2
41
+ - 2
42
+ - 2
43
+ - 2
44
+ - 4
45
+ - 4
46
+ - 4
47
+ - 4
48
+ - 4
49
+ - 4
50
+ - 4
51
+ - 4
52
+ - 4
53
+ - 4
54
+ - 4
55
+ - 4
56
+ - 12
57
+ - 12
58
+ - 12
59
+ - 12
60
+ - 12
61
+ - 12
62
+ - 12
63
+ - 12
64
+ - 24
65
+ - 24
66
+ - 24
67
+ - 24
68
+ - 24
69
+ - 24
70
+ - 24
71
+ - 24
72
+ - 48
73
+ - 48
74
+ - 48
75
+ - 48
76
+ - 48
77
+ - 48
78
+ - 48
79
+ - 48
80
+ - 128
81
+ - 129
82
+ dim_head: 64
83
+ heads: 8
84
+ attn_dropout: 0.1
85
+ ff_dropout: 0.1
86
+ flash_attn: true
87
+ dim_freqs_in: 1025
88
+ stft_n_fft: 2048
89
+ stft_hop_length: 441
90
+ stft_win_length: 2048
91
+ stft_normalized: false
92
+ mask_estimator_depth: 2
93
+ multi_stft_resolution_loss_weight: 1.0
94
+ multi_stft_resolutions_window_sizes: !!python/tuple
95
+ - 4096
96
+ - 2048
97
+ - 1024
98
+ - 512
99
+ - 256
100
+ multi_stft_hop_size: 147
101
+ multi_stft_normalized: False
102
+
103
+ training:
104
+ batch_size: 2
105
+ gradient_accumulation_steps: 1
106
+ grad_clip: 0
107
+ instruments:
108
+ - vocals
109
+ - other
110
+ lr: 1.0e-05
111
+ patience: 2
112
+ reduce_factor: 0.95
113
+ target_instrument: vocals
114
+ num_epochs: 1000
115
+ num_steps: 1000
116
+ q: 0.95
117
+ coarse_loss_clip: true
118
+ ema_momentum: 0.999
119
+ optimizer: adam
120
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
121
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
122
+
123
+ inference:
124
+ batch_size: 4
125
+ dim_t: 801
126
+ num_overlap: 2
data_pipeline/ckpts/dnsmos_p808.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9246480c58567bc6affd4200938e77eef49468c8bc7ed3776d109c07456f6e91
3
+ size 224860
data_pipeline/ckpts/wav2vec2_xlsr_300m.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ad6f8ff9711c7acc72fbc6ffcdba6bc2582fba92c6b056d71de8a4ed77b6b22
3
+ size 1261921380
data_pipeline/duration/duration_mutagen.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import sys
4
+ from tqdm import tqdm
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ from mutagen.wave import WAVE # mutagen for reading wav metadata
8
+
9
+ filelist_or_dir = sys.argv[1] # filelist including absolute path or data root path
10
+
11
+ total_duration = 0.
12
+ durations = []
13
+
14
+
15
+ def get_wav_duration(file_path):
16
+ try:
17
+ duration = WAVE(file_path).info.length
18
+ return duration
19
+ except Exception as e:
20
+ print('Error occurred:', e)
21
+ return None
22
+
23
+ if os.path.isdir(filelist_or_dir):
24
+ filelist = [os.path.join(filelist_or_dir, filename) for filename in glob.glob(os.path.join(filelist_or_dir, '**/*.wav'), recursive=True)]
25
+ else:
26
+ filelist = open(filelist_or_dir, 'r').read().splitlines()
27
+ for wav_path in tqdm(filelist):
28
+ try:
29
+ duration = get_wav_duration(wav_path)
30
+ total_duration += duration
31
+ durations.append(duration)
32
+ except Exception as e:
33
+ print(e)
34
+
35
+ print(f"total_duration: {total_duration}, avg_duration: {total_duration / len(durations)}")
36
+
37
+ #plt.hist(durations, bins=50, range=(0, 50))
38
+ #plt.savefig(os.path.join(os.path.dirname(data_root), "durations.png"))
39
+ #np.save(os.path.join(os.path.dirname(data_root), "1.npy"), np.array(durations))
data_pipeline/g2p_en.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ '''
4
+ Cleaners are transformations that run over the input text at both training and eval time.
5
+
6
+ Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
+ hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
+ 1. "english_cleaners" for English text
9
+ 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
+ the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
+ 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
+ the symbols in symbols.py to match your data).
13
+ '''
14
+
15
+ import re
16
+ from unidecode import unidecode
17
+ from phonemizer import phonemize
18
+ from phonemizer.backend import EspeakBackend
19
+ import matplotlib.pyplot as plt
20
+ import traceback
21
+ import sys
22
+ import os
23
+ from tqdm import tqdm
24
+ import numpy as np
25
+
26
+
27
+ # Regular expression matching whitespace:
28
+ _whitespace_re = re.compile(r'\s+')
29
+
30
+ # List of (regular expression, replacement) pairs for abbreviations:
31
+ _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
32
+ ('mrs', 'misess'),
33
+ ('mr', 'mister'),
34
+ ('dr', 'doctor'),
35
+ ('st', 'saint'),
36
+ ('co', 'company'),
37
+ ('jr', 'junior'),
38
+ ('maj', 'major'),
39
+ ('gen', 'general'),
40
+ ('drs', 'doctors'),
41
+ ('rev', 'reverend'),
42
+ ('lt', 'lieutenant'),
43
+ ('hon', 'honorable'),
44
+ ('sgt', 'sergeant'),
45
+ ('capt', 'captain'),
46
+ ('esq', 'esquire'),
47
+ ('ltd', 'limited'),
48
+ ('col', 'colonel'),
49
+ ('ft', 'fort'),
50
+ ]]
51
+
52
+
53
+ def expand_abbreviations(text):
54
+ for regex, replacement in _abbreviations:
55
+ text = re.sub(regex, replacement, text)
56
+ return text
57
+
58
+
59
+ def expand_numbers(text):
60
+ return normalize_numbers(text)
61
+
62
+
63
+ def lowercase(text):
64
+ return text.lower()
65
+
66
+
67
+ def collapse_whitespace(text):
68
+ return re.sub(_whitespace_re, ' ', text)
69
+
70
+
71
+ def convert_to_ascii(text):
72
+ return unidecode(text)
73
+
74
+
75
+ def basic_cleaners(text):
76
+ '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
77
+ text = lowercase(text)
78
+ text = collapse_whitespace(text)
79
+ return text
80
+
81
+
82
+ def transliteration_cleaners(text):
83
+ '''Pipeline for non-English text that transliterates to ASCII.'''
84
+ text = convert_to_ascii(text)
85
+ text = lowercase(text)
86
+ text = collapse_whitespace(text)
87
+ return text
88
+
89
+
90
+ def english_cleaners(text):
91
+ '''Pipeline for English text, including abbreviation expansion.'''
92
+ text = convert_to_ascii(text)
93
+ text = lowercase(text)
94
+ text = expand_abbreviations(text)
95
+ phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
96
+ phonemes = collapse_whitespace(phonemes)
97
+ return phonemes
98
+
99
+
100
+ def english_cleaners2(text):
101
+ '''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
102
+
103
+
104
+ if __name__ == '__main__':
105
+ text_file = sys.argv[1]
106
+ phoneme_file = sys.argv[2]
107
+
108
+ backend = EspeakBackend('en-us', preserve_punctuation=True, with_stress=True)
109
+
110
+ buffer = ""
111
+
112
+ out_file = open(phoneme_file, 'w')
113
+ for line in tqdm(open(text_file, errors='ignore').read().splitlines()):
114
+ try:
115
+ filepath, text, language, confidence = line.split('|')
116
+ confidence = float(confidence)
117
+ filename = os.path.basename(filepath).split('.')[0]
118
+ duration = float(filename.split('_')[-1]) / 1000
119
+
120
+ if language == "en":
121
+ phone = convert_to_ascii(text)
122
+ phone = lowercase(phone)
123
+ phone = expand_abbreviations(phone)
124
+
125
+ phone = backend.phonemize([phone], strip=True)[0]
126
+ phone = collapse_whitespace(phone)
127
+ ratio = len(phone) / duration
128
+ else:
129
+ phone = "[blank]"
130
+ ratio = 0
131
+ buffer += f"{filepath}|{text}|{phone}|{language}|{confidence:.3f}|{ratio:.3f}\n"
132
+ if len(buffer) > 100000:
133
+ out_file.write(buffer)
134
+ buffer = ""
135
+ except Exception as e:
136
+ print(filename, line, e)
137
+ continue
138
+ out_file.write(buffer)
data_pipeline/g2p_es.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from unidecode import unidecode
3
+ from transformers import T5ForConditionalGeneration, AutoTokenizer
4
+ import matplotlib.pyplot as plt
5
+ import traceback
6
+ import sys
7
+ import os
8
+ from tqdm import tqdm
9
+ import numpy as np
10
+
11
+
12
+ # Regular expression matching whitespace:
13
+ _whitespace_re = re.compile(r'\s+')
14
+
15
+ # List of (regular expression, replacement) pairs for abbreviations:
16
+ _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
17
+ ('mrs', 'misess'),
18
+ ('mr', 'mister'),
19
+ ('dr', 'doctor'),
20
+ ('st', 'saint'),
21
+ ('co', 'company'),
22
+ ('jr', 'junior'),
23
+ ('maj', 'major'),
24
+ ('gen', 'general'),
25
+ ('drs', 'doctors'),
26
+ ('rev', 'reverend'),
27
+ ('lt', 'lieutenant'),
28
+ ('hon', 'honorable'),
29
+ ('sgt', 'sergeant'),
30
+ ('capt', 'captain'),
31
+ ('esq', 'esquire'),
32
+ ('ltd', 'limited'),
33
+ ('col', 'colonel'),
34
+ ('ft', 'fort'),
35
+ ]]
36
+
37
+
38
+ def expand_abbreviations(text):
39
+ for regex, replacement in _abbreviations:
40
+ text = re.sub(regex, replacement, text)
41
+ return text
42
+
43
+
44
+ def expand_numbers(text):
45
+ return normalize_numbers(text)
46
+
47
+
48
+ def lowercase(text):
49
+ return text.lower()
50
+
51
+
52
+ def collapse_whitespace(text):
53
+ return re.sub(_whitespace_re, ' ', text)
54
+
55
+
56
+ def convert_to_ascii(text):
57
+ return unidecode(text)
58
+
59
+ puncs_to_remove = ["♪", "#", "¿", "¡", "-", "*"]
60
+ puncs_to_remove = "".join(puncs_to_remove)
61
+ def normalize(text):
62
+ text = text.translate(str.maketrans('', '', puncs_to_remove))
63
+ text = text.strip()
64
+ return text
65
+
66
+
67
+ def basic_cleaners(text):
68
+ '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
69
+ text = lowercase(text)
70
+ text = collapse_whitespace(text)
71
+ return text
72
+
73
+
74
+ def transliteration_cleaners(text):
75
+ '''Pipeline for non-English text that transliterates to ASCII.'''
76
+ text = convert_to_ascii(text)
77
+ text = lowercase(text)
78
+ text = collapse_whitespace(text)
79
+ return text
80
+
81
+
82
+ def english_cleaners(text):
83
+ '''Pipeline for English text, including abbreviation expansion.'''
84
+ text = convert_to_ascii(text)
85
+ text = lowercase(text)
86
+ text = expand_abbreviations(text)
87
+ phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
88
+ phonemes = collapse_whitespace(phonemes)
89
+ return phonemes
90
+
91
+
92
+ def english_cleaners2(text):
93
+ '''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
94
+
95
+
96
+ if __name__ == '__main__':
97
+ text_file = sys.argv[1]
98
+ phoneme_file = sys.argv[2]
99
+
100
+
101
+ model = T5ForConditionalGeneration.from_pretrained('charsiu/g2p_multilingual_byT5_tiny_16_layers_100')
102
+ #model.cuda()
103
+ tokenizer = AutoTokenizer.from_pretrained('google/byt5-small')
104
+
105
+ buffer = ""
106
+
107
+ out_file = open(phoneme_file, 'w')
108
+ for line in tqdm(open(text_file, errors='ignore').read().splitlines()):
109
+ try:
110
+ filepath, text, language, confidence = line.split('|')
111
+ confidence = float(confidence)
112
+ filename = os.path.basename(filepath).split('.')[0]
113
+ duration = float(filename.split('_')[-1]) / 1000
114
+
115
+ if language == "es":
116
+ #text = convert_to_ascii(text)
117
+ text = normalize(text)
118
+ text = lowercase(text)
119
+ print(text)
120
+
121
+ words = text.split(' ')
122
+ words = ['<spa>: '+i for i in words]
123
+ out = tokenizer(words,padding=True,add_special_tokens=False,return_tensors='pt')
124
+
125
+ preds = model.generate(**out,num_beams=1,max_length=50) # We do not find beam search helpful. Greedy decoding is enough.
126
+ phone = tokenizer.batch_decode(preds.tolist(),skip_special_tokens=True)
127
+ phone = " ".join(phone)
128
+ print(phone)
129
+
130
+ phone = collapse_whitespace(phone)
131
+ ratio = len(phone) / duration
132
+ else:
133
+ phone = "[blank]"
134
+ ratio = 0
135
+ buffer += f"{filepath}|{text}|{phone}|{language}|{confidence:.3f}|{ratio:.3f}\n"
136
+ if len(buffer) > 100000:
137
+ out_file.write(buffer)
138
+ buffer = ""
139
+ #break
140
+ except Exception as e:
141
+ print(filename, line, e)
142
+ continue
143
+ out_file.write(buffer)
data_pipeline/merge_metrics.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import re
3
+ from unidecode import unidecode
4
+ from phonemizer import phonemize
5
+ from phonemizer.backend import EspeakBackend
6
+ import matplotlib.pyplot as plt
7
+ import traceback
8
+ import argparse
9
+ import os
10
+ from tqdm import tqdm
11
+ import numpy as np
12
+
13
+
14
+ if __name__ == '__main__':
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument("--phone", type=str, required=True)
17
+ parser.add_argument("--mos", type=str, required=True)
18
+ parser.add_argument("--spk", type=str, required=True)
19
+ parser.add_argument("--output", type=str, required=True)
20
+ args = parser.parse_args()
21
+
22
+ ratios = []
23
+
24
+ mos_file = open(args.mos, 'r').read().splitlines()
25
+ mos = {}
26
+ for line in mos_file:
27
+ try:
28
+ file_path, mos_score = line.split('|')
29
+ filename = os.path.basename(file_path).split('.')[0]
30
+ mos[filename] = float(mos_score)
31
+ except:
32
+ print(line)
33
+
34
+ spk_file = open(args.spk).read().splitlines()
35
+ spk = {}
36
+ for line in spk_file:
37
+ try:
38
+ file_path, score = line.split('|')
39
+ filename = os.path.basename(file_path).split('.')[0]
40
+ spk[filename] = float(score)
41
+ except:
42
+ print(line)
43
+
44
+ buffer = ""
45
+ out_file = open(args.output, 'w')
46
+ for line in tqdm(open(args.phone, errors='ignore').read().splitlines()):
47
+ try:
48
+ filepath, text, phone, language, confidence, ratio = line.split('|')
49
+ confidence = float(confidence)
50
+ ratio = float(ratio)
51
+ filename = os.path.basename(filepath).split('.')[0]
52
+ mos_score = mos[filename]
53
+ spk_score = spk[filename]
54
+
55
+ buffer += f"{filepath}|{text}|{phone}|{mos_score:.3f}|{language}|{confidence:.3f}|{spk_score:.3f}|{ratio:.3f}\n"
56
+ if len(buffer) > 100000:
57
+ out_file.write(buffer)
58
+ buffer = ""
59
+ ratios.append(ratio)
60
+ except Exception as e:
61
+ print(e, line)
62
+ traceback.print_exc()
63
+ continue
64
+ out_file.write(buffer)
data_pipeline/pipeline.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #input_dir=$PWD/$1
2
+ input_dir=$1
3
+ output_root=$2
4
+ stage=${3:-0}
5
+ stop_stage=${4:-2}
6
+
7
+ echo "from ${input_dir} to ${output_root}"
8
+ python3 --version
9
+
10
+ set -euo pipefail
11
+
12
+ # seperation & segmentation
13
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
14
+ echo "Seperating..."
15
+ cd seperation
16
+ python3 inference_mp.py --filelist_or_dir $output_root/wav --out_dir $output_root --jobs 2 --ckpt_path /data/v-ziqianning/SingingTTS/data_pipeline/ckpts/bs_roformer
17
+ cd -
18
+ fi
19
+
20
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
21
+ echo "Segmenting..."
22
+ cd vad
23
+ python3 vad_webrtcvad.py --filelist_or_dir ${input_dir}/vocal --out_dir ${output_root}/ --jobs 16
24
+ cd -
25
+ fi
26
+
27
+ # ssl
28
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
29
+ echo "Extracting SSL..."
30
+ cd ssl
31
+ python3 extract_xlsr.py $output_root/vocal_cut $output_root 2 # vocal
32
+ python3 extract_xlsr_6l.py $output_root/vocal_cut $output_root 2 # bgm
33
+ cd -
34
+ fi
35
+
36
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
37
+ echo "Quality Metrics..."
38
+ cd quality
39
+ python3 dnsmos_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 8 --ckpt_path /data/v-ziqianning/SingingTTS/data_pipeline/ckpts
40
+ python3 pyannote_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 8
41
+ cd -
42
+ fi
43
+
44
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
45
+ echo "Extracting lyrics..."
46
+ cd asr
47
+ python3 faster_whisper_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 2
48
+ cd -
49
+ fi
50
+
51
+ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
52
+ python3 g2p_en.py $output_root/text.txt $output_root/phoneme.txt
53
+ python3 merge_metrics.py --phone $output_root/phoneme.txt --mos $output_root/dnsmos.txt --spk $output_root/spk.txt --output $output_root/data.txt
54
+ fi
data_pipeline/quality/dnsmos_mp.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import torch
4
+ import torch.multiprocessing as mp
5
+ import threading
6
+ import numpy as np
7
+ import glob
8
+ import argparse
9
+ import librosa
10
+ import soxr
11
+ from tqdm import tqdm
12
+ import traceback
13
+ import multiprocessing
14
+ #from speechmos import dnsmos
15
+ import onnxruntime as ort
16
+ os.environ["OMP_NUM_THREADS"] = "1"
17
+ #os.environ["MKL_NUM_THREADS"] = "1"
18
+
19
+ file_lock = multiprocessing.Lock()
20
+
21
+ SR = 16000
22
+ INPUT_LENGTH = 9.01
23
+ dnsmos = None
24
+
25
+
26
+ class DNSMOS:
27
+ def __init__(self, primary_model_path, p808_model_path, rank) -> None:
28
+ self.primary_model_path = primary_model_path
29
+ sess_opt = ort.SessionOptions()
30
+ sess_opt.intra_op_num_threads = 1
31
+ sess_opt.inter_op_num_threads = 1
32
+ sess_opt.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
33
+ #providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),})]
34
+ #providers = ["CUDAExecutionProvider"]
35
+ #providers = ["CPUExecutionProvider"]
36
+ providers = [
37
+ ('CUDAExecutionProvider', {
38
+ 'device_id': rank,
39
+ }),
40
+ 'CPUExecutionProvider',
41
+ ]
42
+ #self.onnx_sess = ort.InferenceSession(self.primary_model_path, sess_opt, providers=providers)
43
+ self.p808_onnx_sess = ort.InferenceSession(p808_model_path, sess_opt, providers=providers)
44
+ #print(self.p808_onnx_sess.get_providers())
45
+
46
+ def audio_melspec(self, audio, n_mels=120, frame_size=320, hop_length=160, sr=16000, to_db=True):
47
+ mel_spec = librosa.feature.melspectrogram(
48
+ y=audio, sr=sr, n_fft=frame_size + 1, hop_length=hop_length, n_mels=n_mels)
49
+ if to_db:
50
+ mel_spec = (librosa.power_to_db(mel_spec, ref=np.max) + 40) / 40
51
+ return mel_spec.T
52
+
53
+ def get_polyfit_val(self, sig, bak, ovr, is_personalized_MOS):
54
+ if is_personalized_MOS:
55
+ p_ovr = np.poly1d(
56
+ [-0.00533021, 0.005101, 1.18058466, -0.11236046])
57
+ p_sig = np.poly1d(
58
+ [-0.01019296, 0.02751166, 1.19576786, -0.24348726])
59
+ p_bak = np.poly1d(
60
+ [-0.04976499, 0.44276479, -0.1644611, 0.96883132])
61
+ else:
62
+ p_ovr = np.poly1d([-0.06766283, 1.11546468, 0.04602535])
63
+ p_sig = np.poly1d([-0.08397278, 1.22083953, 0.0052439])
64
+ p_bak = np.poly1d([-0.13166888, 1.60915514, -0.39604546])
65
+
66
+ sig_poly = p_sig(sig)
67
+ bak_poly = p_bak(bak)
68
+ ovr_poly = p_ovr(ovr)
69
+
70
+ return sig_poly, bak_poly, ovr_poly
71
+
72
+ def __call__(self, sample, fs, is_personalized_MOS):
73
+ clip_dict = {}
74
+ if isinstance(sample, np.ndarray):
75
+ audio = sample
76
+ if not ((audio >= -1).all() and (audio <= 1).all()):
77
+ raise ValueError("np.ndarray values must be between -1 and 1.")
78
+ elif isinstance(sample, str) and os.path.isfile(sample):
79
+ audio, _ = librosa.load(sample, sr=fs)
80
+ clip_dict['filename'] = sample
81
+ else:
82
+ raise ValueError(
83
+ f"Input must be a numpy array or a path to an audio file.")
84
+
85
+ len_samples = int(INPUT_LENGTH * fs)
86
+ while len(audio) < len_samples:
87
+ audio = np.append(audio, audio)
88
+
89
+ num_hops = int(np.floor(len(audio) / fs) - INPUT_LENGTH) + 1
90
+ hop_len_samples = fs
91
+ predicted_mos_sig_seg = []
92
+ predicted_mos_bak_seg = []
93
+ predicted_mos_ovr_seg = []
94
+ predicted_p808_mos = []
95
+
96
+ for idx in range(num_hops):
97
+ audio_seg = audio[int(idx * hop_len_samples): int((idx + INPUT_LENGTH) * hop_len_samples)]
98
+ if len(audio_seg) < len_samples:
99
+ continue
100
+
101
+ input_features = np.array(audio_seg).astype(
102
+ 'float32')[np.newaxis, :]
103
+ p808_input_features = np.array(self.audio_melspec(
104
+ audio=audio_seg[:-160])).astype('float32')[np.newaxis, :, :]
105
+ oi = {'input_1': input_features}
106
+ p808_oi = {'input_1': p808_input_features}
107
+ p808_mos = self.p808_onnx_sess.run(None, p808_oi)[0][0][0]
108
+ #mos_sig_raw, mos_bak_raw, mos_ovr_raw = self.onnx_sess.run(None, oi)[
109
+ # 0][0]
110
+ #mos_sig, mos_bak, mos_ovr = self.get_polyfit_val(
111
+ # mos_sig_raw, mos_bak_raw, mos_ovr_raw, is_personalized_MOS)
112
+ #predicted_mos_sig_seg.append(mos_sig)
113
+ #predicted_mos_bak_seg.append(mos_bak)
114
+ #predicted_mos_ovr_seg.append(mos_ovr)
115
+ predicted_p808_mos.append(p808_mos)
116
+
117
+ #clip_dict['ovrl_mos'] = np.mean(predicted_mos_ovr_seg)
118
+ #clip_dict['sig_mos'] = np.mean(predicted_mos_sig_seg)
119
+ #clip_dict['bak_mos'] = np.mean(predicted_mos_bak_seg)
120
+ clip_dict['p808_mos'] = np.mean(predicted_p808_mos)
121
+ return clip_dict
122
+
123
+ def normalize_audio(y, target_dbfs=0):
124
+ max_amplitude = np.max(np.abs(y))
125
+ if max_amplitude < 0.1:
126
+ return y
127
+
128
+ target_amplitude = 10.0**(target_dbfs / 20.0)
129
+ scale_factor = target_amplitude / max_amplitude
130
+ #print(max_amplitude, target_amplitude, scale_factor)
131
+
132
+ normalized_audio = y * scale_factor
133
+
134
+ return normalized_audio
135
+
136
+
137
+ def inference(rank, ckpt_dir, text_path, queue: mp.Queue):
138
+ p808_model_path = os.path.join(ckpt_dir, 'dnsmos_p808.onnx')
139
+ primary_model_path = os.path.join(ckpt_dir, 'sig_bak_ovr.onnx')
140
+ dnsmos = DNSMOS(primary_model_path, p808_model_path, rank)
141
+
142
+ def write_to_file(data):
143
+ with file_lock:
144
+ with open(text_path, 'a') as f:
145
+ f.write(data)
146
+
147
+ buffer = ""
148
+
149
+ with torch.no_grad():
150
+ while True:
151
+ #print(texts)
152
+ filename = queue.get()
153
+ if filename is None:
154
+ write_to_file(buffer)
155
+ break
156
+ try:
157
+ filename = filename[0]
158
+ audio_path = filename
159
+ wav, sr = librosa.load(audio_path, sr=None)
160
+ wav = normalize_audio(wav, -6)
161
+ wav = soxr.resample(
162
+ wav, # 1D(mono) or 2D(frames, channels) array input
163
+ sr, # input samplerate
164
+ 16000 # target samplerate
165
+ )
166
+ if wav.min() < -1 or wav.min() > 1:
167
+ print(audio_path)
168
+ mos_dict = dnsmos(wav, 16000, False)
169
+ p808_mos = mos_dict['p808_mos']
170
+ buffer += f"{filename}|{p808_mos:3}\n"
171
+ if len(buffer) > 10000:
172
+ write_to_file(buffer)
173
+ buffer = ""
174
+ except Exception as e:
175
+ print(audio_path)
176
+ traceback.print_exc()
177
+
178
+
179
+ def setInterval(interval):
180
+ def decorator(function):
181
+ def wrapper(*args, **kwargs):
182
+ stopped = threading.Event()
183
+
184
+ def loop(): # executed in another thread
185
+ while not stopped.wait(interval): # until stopped
186
+ function(*args, **kwargs)
187
+
188
+ t = threading.Thread(target=loop)
189
+ t.daemon = True # stop if the program exits
190
+ t.start()
191
+ return stopped
192
+
193
+ return wrapper
194
+
195
+ return decorator
196
+
197
+ last_batches = None
198
+
199
+ @setInterval(5)
200
+ def QueueWatcher(queue, bar):
201
+ global last_batches
202
+ curr_batches = queue.qsize()
203
+ bar.update(last_batches-curr_batches)
204
+ last_batches = curr_batches
205
+
206
+
207
+ if __name__ == "__main__":
208
+ #audio_dir = sys.argv[1]
209
+ parser = argparse.ArgumentParser()
210
+ parser.add_argument("--filelist_or_dir", type=str, required=True)
211
+ parser.add_argument("--text_path", type=str, required=True, help="Dir to save output")
212
+ parser.add_argument("--jobs", type=int, required=False, default=2)
213
+ parser.add_argument("--log_dir", type=str, required=False, help="For aml compatibility")
214
+ parser.add_argument("--model_dir", type=str, required=False, help="For aml compatibility")
215
+ parser.add_argument("--ckpt_path", type=str, required=False, default=".")
216
+ args = parser.parse_args()
217
+
218
+ mp.set_start_method('spawn',force=True)
219
+
220
+ filelist_or_dir = args.filelist_or_dir
221
+ text_path = args.text_path
222
+ jobs = args.jobs
223
+ ckpt_path = args.ckpt_path
224
+ os.makedirs(text_path, exist_ok=True)
225
+
226
+ if os.path.isfile(filelist_or_dir):
227
+ filelist_name = filelist_or_dir.split('/')[-1].split('.')[0]
228
+ generator = open(filelist_or_dir).read().splitlines()
229
+ text_path = os.path.join(text_path, f"{filelist_name}_dnsmos.txt")
230
+ else:
231
+ filelist_name = "single"
232
+ generator = glob.glob(f"{filelist_or_dir}/*.wav")
233
+ text_path = os.path.join(text_path, "dnsmos.txt")
234
+
235
+ os.system(f"rm {text_path}")
236
+
237
+ gpu_num = torch.cuda.device_count()
238
+
239
+ processes = []
240
+ queue = mp.Queue()
241
+ for thread_num in range(jobs):
242
+
243
+ rank = thread_num % gpu_num
244
+ p = mp.Process(target=inference, args=(rank, ckpt_path, text_path, queue))
245
+ p.start()
246
+ processes.append(p)
247
+
248
+ accum = []
249
+ tmp_file = []
250
+
251
+ for filename in generator:
252
+ accum.append(filename)
253
+ if len(accum) == 1:
254
+ queue.put(accum.copy())
255
+ accum.clear()
256
+
257
+
258
+ for _ in range(jobs):
259
+ queue.put(None)
260
+
261
+ last_batches = queue.qsize()
262
+ bar = tqdm(total=last_batches, desc='dnsmos')
263
+ queue_watcher = QueueWatcher(queue, bar)
264
+ for p in processes:
265
+ p.join()
266
+ queue_watcher.set()
data_pipeline/quality/pyannote_mp.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import torch
4
+ import torch.multiprocessing as mp
5
+ import multiprocessing
6
+ import threading
7
+ import numpy as np
8
+ import glob
9
+ import argparse
10
+ from tqdm import tqdm
11
+ from collections import defaultdict
12
+ import traceback
13
+ from pyannote.audio import Pipeline
14
+
15
+ file_lock = multiprocessing.Lock()
16
+
17
+
18
+ def inference(rank, text_path, queue: mp.Queue):
19
+ device=f"cuda:{rank}"
20
+ pipeline = Pipeline.from_pretrained(
21
+ "pyannote/speaker-diarization-3.1",
22
+ use_auth_token="Your huggingface token")
23
+ pipeline.to(torch.device(device))
24
+
25
+ def write_to_file(data):
26
+ with file_lock:
27
+ with open(text_path, 'a') as f:
28
+ f.write(data)
29
+
30
+ buffer = ""
31
+
32
+ with torch.no_grad():
33
+ while True:
34
+ #print(texts)
35
+ filename = queue.get()
36
+ if filename is None:
37
+ write_to_file(buffer)
38
+ break
39
+ try:
40
+ filename = filename[0]
41
+ audio_path = filename
42
+
43
+ spks = defaultdict(float)
44
+ total_duration = 0.
45
+
46
+ diarization = pipeline(audio_path)
47
+ for turn, _, speaker in diarization.itertracks(yield_label=True):
48
+ duration = turn.end - turn.start
49
+ spks[speaker] += duration
50
+ total_duration += duration
51
+
52
+ if len(spks) == 0:
53
+ percentage = 0.
54
+ else:
55
+ sorted_spks = sorted(spks.items(), key=lambda s:s[1], reverse=True)
56
+ percentage = sorted_spks[0][1] / total_duration
57
+
58
+ buffer += f"{filename}|{percentage:3}\n"
59
+ if len(buffer) > 10000:
60
+ write_to_file(buffer)
61
+ buffer = ""
62
+ except Exception as e:
63
+ #print(sorted_spks)
64
+ traceback.print_exc()
65
+
66
+
67
+ def setInterval(interval):
68
+ def decorator(function):
69
+ def wrapper(*args, **kwargs):
70
+ stopped = threading.Event()
71
+
72
+ def loop(): # executed in another thread
73
+ while not stopped.wait(interval): # until stopped
74
+ function(*args, **kwargs)
75
+
76
+ t = threading.Thread(target=loop)
77
+ t.daemon = True # stop if the program exits
78
+ t.start()
79
+ return stopped
80
+
81
+ return wrapper
82
+
83
+ return decorator
84
+
85
+ last_batches = None
86
+
87
+ @setInterval(5)
88
+ def QueueWatcher(queue, bar):
89
+ global last_batches
90
+ curr_batches = queue.qsize()
91
+ bar.update(last_batches-curr_batches)
92
+ last_batches = curr_batches
93
+
94
+
95
+ if __name__ == "__main__":
96
+ #audio_dir = sys.argv[1]
97
+ parser = argparse.ArgumentParser()
98
+ parser.add_argument("--filelist_or_dir", type=str, required=True)
99
+ parser.add_argument("--text_path", type=str, required=True, help="Dir to save output")
100
+ parser.add_argument("--jobs", type=int, required=False, default=2)
101
+ parser.add_argument("--log_dir", type=str, required=False, help="For aml compatibility")
102
+ parser.add_argument("--model_dir", type=str, required=False, help="For aml compatibility")
103
+ args = parser.parse_args()
104
+
105
+ mp.set_start_method('spawn',force=True)
106
+
107
+ filelist_or_dir = args.filelist_or_dir
108
+ text_path = args.text_path
109
+ jobs = args.jobs
110
+ os.makedirs(text_path, exist_ok=True)
111
+
112
+ if os.path.isfile(filelist_or_dir):
113
+ filelist_name = filelist_or_dir.split('/')[-1].split('.')[0]
114
+ generator = open(filelist_or_dir).read().splitlines()
115
+ text_path = os.path.join(text_path, f"{filelist_name}_spk.txt")
116
+ else:
117
+ filelist_name = "single"
118
+ generator = glob.glob(f"{filelist_or_dir}/*.wav")
119
+ text_path = os.path.join(text_path, "spk.txt")
120
+
121
+ os.system(f"rm {text_path}")
122
+
123
+ gpu_num = torch.cuda.device_count()
124
+
125
+ processes = []
126
+ queue = mp.Queue()
127
+ for thread_num in range(jobs):
128
+
129
+ rank = thread_num % gpu_num
130
+ p = mp.Process(target=inference, args=(rank, text_path, queue))
131
+ p.start()
132
+ processes.append(p)
133
+
134
+ accum = []
135
+ tmp_file = []
136
+
137
+ for filename in generator:
138
+ accum.append(filename)
139
+ if len(accum) == 1:
140
+ queue.put(accum.copy())
141
+ accum.clear()
142
+
143
+
144
+ for _ in range(jobs):
145
+ queue.put(None)
146
+
147
+ last_batches = queue.qsize()
148
+ bar = tqdm(total=last_batches, desc='pyannote')
149
+ queue_watcher = QueueWatcher(queue, bar)
150
+ for p in processes:
151
+ p.join()
152
+ queue_watcher.set()
data_pipeline/requirements.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ numpy
3
+ pandas
4
+ scipy
5
+ soundfile
6
+ ml_collections
7
+ tqdm
8
+ segmentation_models_pytorch==0.3.3
9
+ timm==0.9.2
10
+ audiomentations==0.24.0
11
+ pedalboard==0.8.1
12
+ omegaconf==2.2.3
13
+ beartype==0.14.1
14
+ rotary_embedding_torch==0.3.5
15
+ einops==0.6.1
16
+ librosa
17
+ demucs==4.0.0
18
+ transformers==4.35.0
19
+ torchmetrics==0.11.4
20
+ spafe==0.3.2
21
+ protobuf==3.20.3
22
+ torch_audiomentations
23
+ asteroid==0.7.0
24
+ auraloss
25
+ pyannote.audio
26
+ webrtcvad
27
+ faster-whisper==0.10.1
data_pipeline/seperation/README.md ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Music Source Separation Universal Training Code
2
+
3
+ Repository for training models for music source separation. Repository is based on [kuielab code](https://github.com/kuielab/sdx23/tree/mdx_AB/my_submission/src) for [SDX23 challenge](https://github.com/kuielab/sdx23/tree/mdx_AB/my_submission/src). The main idea of this repository is to create training code, which is easy to modify for experiments. Brought to you by [MVSep.com](https://mvsep.com).
4
+
5
+ ## Models
6
+
7
+ Model can be chosen with `--model_type` arg.
8
+
9
+ Available models for training:
10
+ * MDX23C based on [KUIELab TFC TDF v3 architecture](https://github.com/kuielab/sdx23/). Key: `mdx23c`.
11
+ * Demucs4HT [[Paper](https://arxiv.org/abs/2211.08553)]. Key: `htdemucs`.
12
+ * VitLarge23 based on [Segmentation Models Pytorch](https://github.com/qubvel/segmentation_models.pytorch). Key: `segm_models`.
13
+ * Band Split RoFormer [[Paper](https://arxiv.org/abs/2309.02612), [Repository](https://github.com/lucidrains/BS-RoFormer)] . Key: `bs_roformer`.
14
+ * Mel-Band RoFormer [[Paper](https://arxiv.org/abs/2310.01809), [Repository](https://github.com/lucidrains/BS-RoFormer)]. Key: `mel_band_roformer`.
15
+ * Swin Upernet [[Paper](https://arxiv.org/abs/2103.14030)] Key: `swin_upernet`.
16
+ * BandIt Plus [[Paper](https://arxiv.org/abs/2309.02539), [Repository](https://github.com/karnwatcharasupat/bandit)] Key: `bandit`.
17
+ * SCNet [[Paper](https://arxiv.org/abs/2401.13276), [Official Repository](https://github.com/starrytong/SCNet), [Unofficial Repository](https://github.com/amanteur/SCNet-PyTorch)] Key: `scnet`.
18
+
19
+ **Note 1**: For `segm_models` there are many different encoders is possible. [Look here](https://github.com/qubvel/segmentation_models.pytorch#encoders-).
20
+
21
+ **Note 2**: Thanks to [@lucidrains](https://github.com/lucidrains) for recreating the RoFormer models based on papers.
22
+
23
+ ## How to train
24
+
25
+ To train model you need to:
26
+
27
+ 1) Choose model type with key `--model_type`. Possible values: `mdx23c`, `htdemucs`, `segm_models`, `mel_band_roformer`, `bs_roformer`.
28
+ 2) Choose location of config for model `--config_path` `<config path>`. You can find examples of configs in [configs folder](configs/). Prefixes `config_musdb18_` are examples for [MUSDB18 dataset](https://sigsep.github.io/datasets/musdb.html).
29
+ 3) If you have some check-point from the same model or from the similar model you can use it with: `--start_check_point` `<weights path>`
30
+ 4) Choose path where to store results of training `--results_path` `<results folder path>`
31
+
32
+ #### Example
33
+ ```bash
34
+ python train.py \
35
+ --model_type mel_band_roformer \
36
+ --config_path configs/config_mel_band_roformer_vocals.yaml \
37
+ --start_check_point results/model.ckpt \
38
+ --results_path results/ \
39
+ --data_path 'datasets/dataset1' 'datasets/dataset2' \
40
+ --valid_path datasets/musdb18hq/test \
41
+ --num_workers 4 \
42
+ --device_ids 0
43
+ ```
44
+
45
+ All available training parameters you can find [here](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/train.py#L109).
46
+
47
+ ## How to inference
48
+
49
+ #### Example
50
+
51
+ ```bash
52
+ python inference.py \
53
+ --model_type mdx23c \
54
+ --config_path configs/config_mdx23c_musdb18.yaml \
55
+ --start_check_point results/last_mdx23c.ckpt \
56
+ --input_folder input/wavs/ \
57
+ --store_dir separation_results/
58
+ ```
59
+
60
+ All available inference parameters you can find [here](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/inference.py#L54).
61
+
62
+ ## Useful notes
63
+
64
+ * All batch sizes in config are adjusted to use with single NVIDIA A6000 48GB. If you have less memory please adjust correspodningly in model config `training.batch_size` and `training.gradient_accumulation_steps`.
65
+ * It's usually always better to start with old weights even if shapes not fully match. Code supports loading weights for not fully same models (but it must have the same architecture). Training will be much faster.
66
+
67
+ ## Code description
68
+
69
+ * `configs/config_*.yaml` - configuration files for models
70
+ * `models/*` - set of available models for training and inference
71
+ * `dataset.py` - dataset which creates new samples for training
72
+ * `inference.py` - process folder with music files and separate them
73
+ * `train.py` - main training code
74
+ * `utils.py` - common functions used by train/valid
75
+ * `valid.py` - validation of model with metrics
76
+
77
+
78
+ ## Pre-trained models
79
+
80
+ If you trained some good models, please, share them. You can post config and model weights [in this issue](https://github.com/ZFTurbo/Music-Source-Separation-Training/issues/1).
81
+
82
+ ### Vocal models
83
+ | Model Type | Instruments | Metrics (SDR) | Config | Checkpoint |
84
+ |:----------------------------------------------------------------:|:-------------:|:-----------------:|:-----:|:-----:|
85
+ | MDX23C | vocals / other | SDR vocals: 10.17 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_mdx23c.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_mdx23c_sdr_10.17.ckpt) |
86
+ | HTDemucs4 (MVSep finetuned) | vocals / other | SDR vocals: 8.78 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_htdemucs.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_htdemucs_sdr_8.78.ckpt) |
87
+ | Segm Models (VitLarge23) | vocals / other | SDR vocals: 9.77 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_segm_models.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_segm_models_sdr_9.77.ckpt) |
88
+ | Mel Band RoFormer | vocals (*) / other | SDR vocals: 8.42 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_mel_band_roformer.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_mel_band_roformer_sdr_8.42.ckpt) |
89
+ | Swin Upernet | vocals / other | SDR vocals: 7.57 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.2/config_vocals_swin_upernet.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.2/model_swin_upernet_ep_56_sdr_10.6703.ckpt) |
90
+ | BS Roformer ([viperx](https://github.com/playdasegunda) edition) | vocals / other | SDR vocals: 10.87 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_bs_roformer_ep_317_sdr_12.9755.ckpt) |
91
+ | Mel Band Roformer ([viperx](https://github.com/playdasegunda) edition) | vocals / other | SDR vocals: 9.67 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_mel_band_roformer_ep_3005_sdr_11.4360.ckpt) |
92
+
93
+ **Note**: Metrics measured on [Multisong Dataset](https://mvsep.com/en/quality_checker).
94
+
95
+ ### Single stem models
96
+ | Model Type | Instruments | Metrics (SDR) | Config | Checkpoint |
97
+ |:-----------------------------------------:|:-------------:|:----------------:|:-----:|:-----:|
98
+ | HTDemucs4 FT Drums | drums | SDR drums: 11.13 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/f7e0c4bc-ba3fe64a.th) |
99
+ | HTDemucs4 FT Bass | bass | SDR bass: 11.96 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/d12395a8-e57c48e6.th) |
100
+ | HTDemucs4 FT Other | other | SDR other: 5.85 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/92cfc3b6-ef3bcb9c.th) |
101
+ | HTDemucs4 FT Vocals (Official repository) | vocals | SDR vocals: 8.38 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/04573f0d-f3cf25b2.th) |
102
+ | BS Roformer ([viperx](https://github.com/playdasegunda) edition) | other | SDR other: 6.85 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_bs_roformer_ep_937_sdr_10.5309.ckpt) |
103
+
104
+ **Note**: All models output 4 stems, but quality is best only on target stem (all other stems are dummy).
105
+
106
+ ### Multi-stem models
107
+
108
+ | Model Type | Instruments | Metrics (SDR) | Config | Checkpoint |
109
+ |:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:-----:|:-----:|
110
+ | MDX23C~~*~~ | bass / drums / vocals / other | MUSDB test avg: 7.15 (bass: 5.77, drums: 7.93 vocals: 9.23 other: 5.68) Multisong avg: 7.02 (bass: 8.40, drums: 7.73 vocals: 7.36 other: 4.57) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.1/config_musdb18_mdx23c.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.1/model_mdx23c_ep_168_sdr_7.0207.ckpt) |
111
+ | BandIt Plus | speech / music / effects | DnR test avg: 11.50 (speech: 15.64, music: 9.18 effects: 9.69) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v.1.0.3/config_dnr_bandit_bsrnn_multi_mus64.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v.1.0.3/model_bandit_plus_dnr_sdr_11.47.chpt) |
112
+ | HTDemucs4 | bass / drums / vocals / other | Multisong avg: 9.16 (bass: 11.76, drums: 10.88 vocals: 8.24 other: 5.74) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/955717e8-8726e21a.th) |
113
+ | HTDemucs4 (6 stems) | bass / drums / vocals / other / piano / guitar | Multisong (bass: 11.22, drums: 10.22 vocals: 8.05 other: --- piano: --- guitar: ---) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_htdemucs_6stems.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/5c90dfd2-34c22ccb.th) |
114
+ | Demucs3 mmi | bass / drums / vocals / other | Multisong avg: 8.88 (bass: 11.17, drums: 10.70 vocals: 8.22 other: 5.42) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_demucs3_mmi.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/75fc33f5-1941ce65.th) |
115
+
116
+ ~~*~~ **Note**: Model was trained only on MUSDB18HQ dataset (100 songs train data)
117
+
118
+ ## Dataset types
119
+
120
+ Look here: [Dataset types](docs/dataset_types.md)
121
+
122
+ ## Augmentations
123
+
124
+ Look here: [Augmentations](docs/augmentations.md)
125
+
126
+ ## Citation
127
+
128
+ * [arxiv paper](https://arxiv.org/abs/2305.07489)
129
+
130
+ ```
131
+ @misc{solovyev2023benchmarks,
132
+ title={Benchmarks and leaderboards for sound demixing tasks},
133
+ author={Roman Solovyev and Alexander Stempkovskiy and Tatiana Habruseva},
134
+ year={2023},
135
+ eprint={2305.07489},
136
+ archivePrefix={arXiv},
137
+ primaryClass={cs.SD}
138
+ }
139
+ ```
data_pipeline/seperation/configs/config_dnr_bandit_bsrnn_multi_mus64.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "MultiMaskMultiSourceBandSplitRNN"
2
+ audio:
3
+ chunk_size: 264600
4
+ num_channels: 2
5
+ sample_rate: 44100
6
+ min_mean_abs: 0.001
7
+
8
+ model:
9
+ in_channel: 1
10
+ stems: ['speech', 'music', 'effects']
11
+ band_specs: "musical"
12
+ n_bands: 64
13
+ fs: 44100
14
+ require_no_overlap: false
15
+ require_no_gap: true
16
+ normalize_channel_independently: false
17
+ treat_channel_as_feature: true
18
+ n_sqm_modules: 8
19
+ emb_dim: 128
20
+ rnn_dim: 256
21
+ bidirectional: true
22
+ rnn_type: "GRU"
23
+ mlp_dim: 512
24
+ hidden_activation: "Tanh"
25
+ hidden_activation_kwargs: null
26
+ complex_mask: true
27
+ n_fft: 2048
28
+ win_length: 2048
29
+ hop_length: 512
30
+ window_fn: "hann_window"
31
+ wkwargs: null
32
+ power: null
33
+ center: true
34
+ normalized: true
35
+ pad_mode: "constant"
36
+ onesided: true
37
+
38
+ training:
39
+ batch_size: 4
40
+ gradient_accumulation_steps: 4
41
+ grad_clip: 0
42
+ instruments:
43
+ - speech
44
+ - music
45
+ - effects
46
+ lr: 9.0e-05
47
+ patience: 2
48
+ reduce_factor: 0.95
49
+ target_instrument: null
50
+ num_epochs: 1000
51
+ num_steps: 1000
52
+ q: 0.95
53
+ coarse_loss_clip: true
54
+ ema_momentum: 0.999
55
+ optimizer: adam
56
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
57
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
58
+
59
+ augmentations:
60
+ enable: true # enable or disable all augmentations (to fast disable if needed)
61
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
62
+ loudness_min: 0.5
63
+ loudness_max: 1.5
64
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
65
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
66
+ - 0.2
67
+ - 0.02
68
+ mixup_loudness_min: 0.5
69
+ mixup_loudness_max: 1.5
70
+ all:
71
+ channel_shuffle: 0.5 # Set 0 or lower to disable
72
+ random_inverse: 0.1 # inverse track (better lower probability)
73
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
74
+
75
+ inference:
76
+ batch_size: 1
77
+ dim_t: 256
78
+ num_overlap: 4
data_pipeline/seperation/configs/config_htdemucs_6stems.yaml ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 485100 # samplerate * segment
3
+ min_mean_abs: 0.001
4
+ hop_length: 1024
5
+
6
+ training:
7
+ batch_size: 8
8
+ gradient_accumulation_steps: 1
9
+ grad_clip: 0
10
+ segment: 11
11
+ shift: 1
12
+ samplerate: 44100
13
+ channels: 2
14
+ normalize: true
15
+ instruments: ['drums', 'bass', 'other', 'vocals', 'guitar', 'piano']
16
+ target_instrument: null
17
+ num_epochs: 1000
18
+ num_steps: 1000
19
+ optimizer: adam
20
+ lr: 9.0e-05
21
+ patience: 2
22
+ reduce_factor: 0.95
23
+ q: 0.95
24
+ coarse_loss_clip: true
25
+ ema_momentum: 0.999
26
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
27
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
28
+
29
+ augmentations:
30
+ enable: true # enable or disable all augmentations (to fast disable if needed)
31
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
32
+ loudness_min: 0.5
33
+ loudness_max: 1.5
34
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
35
+ mixup_probs: [0.2, 0.02]
36
+ mixup_loudness_min: 0.5
37
+ mixup_loudness_max: 1.5
38
+ all:
39
+ channel_shuffle: 0.5 # Set 0 or lower to disable
40
+ random_inverse: 0.1 # inverse track (better lower probability)
41
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
42
+
43
+ inference:
44
+ num_overlap: 4
45
+ batch_size: 8
46
+
47
+ model: htdemucs
48
+
49
+ htdemucs: # see demucs/htdemucs.py for a detailed description
50
+ # Channels
51
+ channels: 48
52
+ channels_time:
53
+ growth: 2
54
+ # STFT
55
+ num_subbands: 1
56
+ nfft: 4096
57
+ wiener_iters: 0
58
+ end_iters: 0
59
+ wiener_residual: false
60
+ cac: true
61
+ # Main structure
62
+ depth: 4
63
+ rewrite: true
64
+ # Frequency Branch
65
+ multi_freqs: []
66
+ multi_freqs_depth: 3
67
+ freq_emb: 0.2
68
+ emb_scale: 10
69
+ emb_smooth: true
70
+ # Convolutions
71
+ kernel_size: 8
72
+ stride: 4
73
+ time_stride: 2
74
+ context: 1
75
+ context_enc: 0
76
+ # normalization
77
+ norm_starts: 4
78
+ norm_groups: 4
79
+ # DConv residual branch
80
+ dconv_mode: 3
81
+ dconv_depth: 2
82
+ dconv_comp: 8
83
+ dconv_init: 1e-3
84
+ # Before the Transformer
85
+ bottom_channels: 0
86
+ # CrossTransformer
87
+ # ------ Common to all
88
+ # Regular parameters
89
+ t_layers: 5
90
+ t_hidden_scale: 4.0
91
+ t_heads: 8
92
+ t_dropout: 0.0
93
+ t_layer_scale: True
94
+ t_gelu: True
95
+ # ------------- Positional Embedding
96
+ t_emb: sin
97
+ t_max_positions: 10000 # for the scaled embedding
98
+ t_max_period: 10000.0
99
+ t_weight_pos_embed: 1.0
100
+ t_cape_mean_normalize: True
101
+ t_cape_augment: True
102
+ t_cape_glob_loc_scale: [5000.0, 1.0, 1.4]
103
+ t_sin_random_shift: 0
104
+ # ------------- norm before a transformer encoder
105
+ t_norm_in: True
106
+ t_norm_in_group: False
107
+ # ------------- norm inside the encoder
108
+ t_group_norm: False
109
+ t_norm_first: True
110
+ t_norm_out: True
111
+ # ------------- optim
112
+ t_weight_decay: 0.0
113
+ t_lr:
114
+ # ------------- sparsity
115
+ t_sparse_self_attn: False
116
+ t_sparse_cross_attn: False
117
+ t_mask_type: diag
118
+ t_mask_random_seed: 42
119
+ t_sparse_attn_window: 400
120
+ t_global_window: 100
121
+ t_sparsity: 0.95
122
+ t_auto_sparsity: False
123
+ # Cross Encoder First (False)
124
+ t_cross_first: False
125
+ # Weight init
126
+ rescale: 0.1
127
+
data_pipeline/seperation/configs/config_musdb18_bs_roformer.yaml ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 131584
3
+ dim_f: 1024
4
+ dim_t: 256
5
+ hop_length: 512
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ dim: 192
13
+ depth: 6
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ freqs_per_bands: !!python/tuple
20
+ - 2
21
+ - 2
22
+ - 2
23
+ - 2
24
+ - 2
25
+ - 2
26
+ - 2
27
+ - 2
28
+ - 2
29
+ - 2
30
+ - 2
31
+ - 2
32
+ - 2
33
+ - 2
34
+ - 2
35
+ - 2
36
+ - 2
37
+ - 2
38
+ - 2
39
+ - 2
40
+ - 2
41
+ - 2
42
+ - 2
43
+ - 2
44
+ - 4
45
+ - 4
46
+ - 4
47
+ - 4
48
+ - 4
49
+ - 4
50
+ - 4
51
+ - 4
52
+ - 4
53
+ - 4
54
+ - 4
55
+ - 4
56
+ - 12
57
+ - 12
58
+ - 12
59
+ - 12
60
+ - 12
61
+ - 12
62
+ - 12
63
+ - 12
64
+ - 24
65
+ - 24
66
+ - 24
67
+ - 24
68
+ - 24
69
+ - 24
70
+ - 24
71
+ - 24
72
+ - 48
73
+ - 48
74
+ - 48
75
+ - 48
76
+ - 48
77
+ - 48
78
+ - 48
79
+ - 48
80
+ - 128
81
+ - 129
82
+ dim_head: 64
83
+ heads: 8
84
+ attn_dropout: 0.1
85
+ ff_dropout: 0.1
86
+ flash_attn: true
87
+ dim_freqs_in: 1025
88
+ stft_n_fft: 2048
89
+ stft_hop_length: 512
90
+ stft_win_length: 2048
91
+ stft_normalized: false
92
+ mask_estimator_depth: 2
93
+ multi_stft_resolution_loss_weight: 1.0
94
+ multi_stft_resolutions_window_sizes: !!python/tuple
95
+ - 4096
96
+ - 2048
97
+ - 1024
98
+ - 512
99
+ - 256
100
+ multi_stft_hop_size: 147
101
+ multi_stft_normalized: False
102
+
103
+ training:
104
+ batch_size: 10
105
+ gradient_accumulation_steps: 1
106
+ grad_clip: 0
107
+ instruments:
108
+ - vocals
109
+ - bass
110
+ - drums
111
+ - other
112
+ lr: 5.0e-05
113
+ patience: 2
114
+ reduce_factor: 0.95
115
+ target_instrument: vocals
116
+ num_epochs: 1000
117
+ num_steps: 1000
118
+ q: 0.95
119
+ coarse_loss_clip: true
120
+ ema_momentum: 0.999
121
+ optimizer: adam
122
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
123
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
124
+
125
+ augmentations:
126
+ enable: true # enable or disable all augmentations (to fast disable if needed)
127
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
128
+ loudness_min: 0.5
129
+ loudness_max: 1.5
130
+
131
+ inference:
132
+ batch_size: 1
133
+ dim_t: 256
134
+ num_overlap: 4
data_pipeline/seperation/configs/config_musdb18_demucs3_mmi.yaml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 485100 # samplerate * segment
3
+ min_mean_abs: 0.000
4
+ hop_length: 1024
5
+
6
+ training:
7
+ batch_size: 8
8
+ gradient_accumulation_steps: 1
9
+ grad_clip: 0
10
+ segment: 11
11
+ shift: 1
12
+ samplerate: 44100
13
+ channels: 2
14
+ normalize: true
15
+ instruments: ['drums', 'bass', 'other', 'vocals']
16
+ target_instrument: null
17
+ num_epochs: 1000
18
+ num_steps: 1000
19
+ optimizer: adam
20
+ lr: 9.0e-05
21
+ patience: 2
22
+ reduce_factor: 0.95
23
+ q: 0.95
24
+ coarse_loss_clip: true
25
+ ema_momentum: 0.999
26
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
27
+ use_amp: false # enable or disable usage of mixed precision (float16) - usually it must be true
28
+
29
+ augmentations:
30
+ enable: true # enable or disable all augmentations (to fast disable if needed)
31
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
32
+ loudness_min: 0.5
33
+ loudness_max: 1.5
34
+
35
+ inference:
36
+ num_overlap: 4
37
+ batch_size: 8
38
+
39
+ model: hdemucs
40
+
41
+ hdemucs: # see demucs/hdemucs.py for a detailed description
42
+ channels: 48
43
+ channels_time: null
44
+ growth: 2
45
+ nfft: 4096
46
+ wiener_iters: 0
47
+ end_iters: 0
48
+ wiener_residual: False
49
+ cac: True
50
+ depth: 6
51
+ rewrite: True
52
+ hybrid: True
53
+ hybrid_old: False
54
+ multi_freqs: []
55
+ multi_freqs_depth: 3
56
+ freq_emb: 0.2
57
+ emb_scale: 10
58
+ emb_smooth: True
59
+ kernel_size: 8
60
+ stride: 4
61
+ time_stride: 2
62
+ context: 1
63
+ context_enc: 0
64
+ norm_starts: 4
65
+ norm_groups: 4
66
+ dconv_mode: 1
67
+ dconv_depth: 2
68
+ dconv_comp: 4
69
+ dconv_attn: 4
70
+ dconv_lstm: 4
71
+ dconv_init: 0.001
72
+ rescale: 0.1
data_pipeline/seperation/configs/config_musdb18_htdemucs.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 485100 # samplerate * segment
3
+ min_mean_abs: 0.001
4
+ hop_length: 1024
5
+
6
+ training:
7
+ batch_size: 8
8
+ gradient_accumulation_steps: 1
9
+ grad_clip: 0
10
+ segment: 11
11
+ shift: 1
12
+ samplerate: 44100
13
+ channels: 2
14
+ normalize: true
15
+ instruments: ['drums', 'bass', 'other', 'vocals']
16
+ target_instrument: null
17
+ num_epochs: 1000
18
+ num_steps: 1000
19
+ optimizer: adam
20
+ lr: 9.0e-05
21
+ patience: 2
22
+ reduce_factor: 0.95
23
+ q: 0.95
24
+ coarse_loss_clip: true
25
+ ema_momentum: 0.999
26
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
27
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
28
+
29
+ augmentations:
30
+ enable: true # enable or disable all augmentations (to fast disable if needed)
31
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
32
+ loudness_min: 0.5
33
+ loudness_max: 1.5
34
+
35
+ inference:
36
+ num_overlap: 4
37
+ batch_size: 8
38
+
39
+ model: htdemucs
40
+
41
+ htdemucs: # see demucs/htdemucs.py for a detailed description
42
+ # Channels
43
+ channels: 48
44
+ channels_time:
45
+ growth: 2
46
+ # STFT
47
+ num_subbands: 1
48
+ nfft: 4096
49
+ wiener_iters: 0
50
+ end_iters: 0
51
+ wiener_residual: false
52
+ cac: true
53
+ # Main structure
54
+ depth: 4
55
+ rewrite: true
56
+ # Frequency Branch
57
+ multi_freqs: []
58
+ multi_freqs_depth: 3
59
+ freq_emb: 0.2
60
+ emb_scale: 10
61
+ emb_smooth: true
62
+ # Convolutions
63
+ kernel_size: 8
64
+ stride: 4
65
+ time_stride: 2
66
+ context: 1
67
+ context_enc: 0
68
+ # normalization
69
+ norm_starts: 4
70
+ norm_groups: 4
71
+ # DConv residual branch
72
+ dconv_mode: 3
73
+ dconv_depth: 2
74
+ dconv_comp: 8
75
+ dconv_init: 1e-3
76
+ # Before the Transformer
77
+ bottom_channels: 512
78
+ # CrossTransformer
79
+ # ------ Common to all
80
+ # Regular parameters
81
+ t_layers: 5
82
+ t_hidden_scale: 4.0
83
+ t_heads: 8
84
+ t_dropout: 0.0
85
+ t_layer_scale: True
86
+ t_gelu: True
87
+ # ------------- Positional Embedding
88
+ t_emb: sin
89
+ t_max_positions: 10000 # for the scaled embedding
90
+ t_max_period: 10000.0
91
+ t_weight_pos_embed: 1.0
92
+ t_cape_mean_normalize: True
93
+ t_cape_augment: True
94
+ t_cape_glob_loc_scale: [5000.0, 1.0, 1.4]
95
+ t_sin_random_shift: 0
96
+ # ------------- norm before a transformer encoder
97
+ t_norm_in: True
98
+ t_norm_in_group: False
99
+ # ------------- norm inside the encoder
100
+ t_group_norm: False
101
+ t_norm_first: True
102
+ t_norm_out: True
103
+ # ------------- optim
104
+ t_weight_decay: 0.0
105
+ t_lr:
106
+ # ------------- sparsity
107
+ t_sparse_self_attn: False
108
+ t_sparse_cross_attn: False
109
+ t_mask_type: diag
110
+ t_mask_random_seed: 42
111
+ t_sparse_attn_window: 400
112
+ t_global_window: 100
113
+ t_sparsity: 0.95
114
+ t_auto_sparsity: False
115
+ # Cross Encoder First (False)
116
+ t_cross_first: False
117
+ # Weight init
118
+ rescale: 0.1
119
+
data_pipeline/seperation/configs/config_musdb18_mdx23c.yaml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 261120
3
+ dim_f: 4096
4
+ dim_t: 256
5
+ hop_length: 1024
6
+ n_fft: 8192
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ act: gelu
13
+ bottleneck_factor: 4
14
+ growth: 128
15
+ norm: InstanceNorm
16
+ num_blocks_per_scale: 2
17
+ num_channels: 128
18
+ num_scales: 5
19
+ num_subbands: 4
20
+ scale:
21
+ - 2
22
+ - 2
23
+
24
+ training:
25
+ batch_size: 6
26
+ gradient_accumulation_steps: 1
27
+ grad_clip: 0
28
+ instruments:
29
+ - vocals
30
+ - bass
31
+ - drums
32
+ - other
33
+ lr: 9.0e-05
34
+ patience: 2
35
+ reduce_factor: 0.95
36
+ target_instrument: null
37
+ num_epochs: 1000
38
+ num_steps: 1000
39
+ q: 0.95
40
+ coarse_loss_clip: true
41
+ ema_momentum: 0.999
42
+ optimizer: adam
43
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
44
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
45
+
46
+ augmentations:
47
+ enable: true # enable or disable all augmentations (to fast disable if needed)
48
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
49
+ loudness_min: 0.5
50
+ loudness_max: 1.5
51
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
52
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
53
+ - 0.2
54
+ - 0.02
55
+ mixup_loudness_min: 0.5
56
+ mixup_loudness_max: 1.5
57
+
58
+ # apply mp3 compression to mixture only (emulate downloading mp3 from internet)
59
+ mp3_compression_on_mixture: 0.01
60
+ mp3_compression_on_mixture_bitrate_min: 32
61
+ mp3_compression_on_mixture_bitrate_max: 320
62
+ mp3_compression_on_mixture_backend: "lameenc"
63
+
64
+ all:
65
+ channel_shuffle: 0.5 # Set 0 or lower to disable
66
+ random_inverse: 0.1 # inverse track (better lower probability)
67
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
68
+ mp3_compression: 0.01
69
+ mp3_compression_min_bitrate: 32
70
+ mp3_compression_max_bitrate: 320
71
+ mp3_compression_backend: "lameenc"
72
+
73
+ # pedalboard reverb block
74
+ pedalboard_reverb: 0.01
75
+ pedalboard_reverb_room_size_min: 0.1
76
+ pedalboard_reverb_room_size_max: 0.9
77
+ pedalboard_reverb_damping_min: 0.1
78
+ pedalboard_reverb_damping_max: 0.9
79
+ pedalboard_reverb_wet_level_min: 0.1
80
+ pedalboard_reverb_wet_level_max: 0.9
81
+ pedalboard_reverb_dry_level_min: 0.1
82
+ pedalboard_reverb_dry_level_max: 0.9
83
+ pedalboard_reverb_width_min: 0.9
84
+ pedalboard_reverb_width_max: 1.0
85
+
86
+ # pedalboard chorus block
87
+ pedalboard_chorus: 0.01
88
+ pedalboard_chorus_rate_hz_min: 1.0
89
+ pedalboard_chorus_rate_hz_max: 7.0
90
+ pedalboard_chorus_depth_min: 0.25
91
+ pedalboard_chorus_depth_max: 0.95
92
+ pedalboard_chorus_centre_delay_ms_min: 3
93
+ pedalboard_chorus_centre_delay_ms_max: 10
94
+ pedalboard_chorus_feedback_min: 0.0
95
+ pedalboard_chorus_feedback_max: 0.5
96
+ pedalboard_chorus_mix_min: 0.1
97
+ pedalboard_chorus_mix_max: 0.9
98
+
99
+ # pedalboard phazer block
100
+ pedalboard_phazer: 0.01
101
+ pedalboard_phazer_rate_hz_min: 1.0
102
+ pedalboard_phazer_rate_hz_max: 10.0
103
+ pedalboard_phazer_depth_min: 0.25
104
+ pedalboard_phazer_depth_max: 0.95
105
+ pedalboard_phazer_centre_frequency_hz_min: 200
106
+ pedalboard_phazer_centre_frequency_hz_max: 12000
107
+ pedalboard_phazer_feedback_min: 0.0
108
+ pedalboard_phazer_feedback_max: 0.5
109
+ pedalboard_phazer_mix_min: 0.1
110
+ pedalboard_phazer_mix_max: 0.9
111
+
112
+ # pedalboard distortion block
113
+ pedalboard_distortion: 0.01
114
+ pedalboard_distortion_drive_db_min: 1.0
115
+ pedalboard_distortion_drive_db_max: 25.0
116
+
117
+ # pedalboard pitch shift block
118
+ pedalboard_pitch_shift: 0.01
119
+ pedalboard_pitch_shift_semitones_min: -7
120
+ pedalboard_pitch_shift_semitones_max: 7
121
+
122
+ # pedalboard resample block
123
+ pedalboard_resample: 0.01
124
+ pedalboard_resample_target_sample_rate_min: 4000
125
+ pedalboard_resample_target_sample_rate_max: 44100
126
+
127
+ # pedalboard bitcrash block
128
+ pedalboard_bitcrash: 0.01
129
+ pedalboard_bitcrash_bit_depth_min: 4
130
+ pedalboard_bitcrash_bit_depth_max: 16
131
+
132
+ # pedalboard mp3 compressor block
133
+ pedalboard_mp3_compressor: 0.01
134
+ pedalboard_mp3_compressor_pedalboard_mp3_compressor_min: 0
135
+ pedalboard_mp3_compressor_pedalboard_mp3_compressor_max: 9.999
136
+
137
+ vocals:
138
+ pitch_shift: 0.1
139
+ pitch_shift_min_semitones: -5
140
+ pitch_shift_max_semitones: 5
141
+ seven_band_parametric_eq: 0.25
142
+ seven_band_parametric_eq_min_gain_db: -9
143
+ seven_band_parametric_eq_max_gain_db: 9
144
+ tanh_distortion: 0.1
145
+ tanh_distortion_min: 0.1
146
+ tanh_distortion_max: 0.7
147
+ bass:
148
+ pitch_shift: 0.1
149
+ pitch_shift_min_semitones: -2
150
+ pitch_shift_max_semitones: 2
151
+ seven_band_parametric_eq: 0.25
152
+ seven_band_parametric_eq_min_gain_db: -3
153
+ seven_band_parametric_eq_max_gain_db: 6
154
+ tanh_distortion: 0.2
155
+ tanh_distortion_min: 0.1
156
+ tanh_distortion_max: 0.5
157
+ drums:
158
+ pitch_shift: 0.33
159
+ pitch_shift_min_semitones: -5
160
+ pitch_shift_max_semitones: 5
161
+ seven_band_parametric_eq: 0.25
162
+ seven_band_parametric_eq_min_gain_db: -9
163
+ seven_band_parametric_eq_max_gain_db: 9
164
+ tanh_distortion: 0.33
165
+ tanh_distortion_min: 0.1
166
+ tanh_distortion_max: 0.6
167
+ other:
168
+ pitch_shift: 0.1
169
+ pitch_shift_min_semitones: -4
170
+ pitch_shift_max_semitones: 4
171
+ gaussian_noise: 0.1
172
+ gaussian_noise_min_amplitude: 0.001
173
+ gaussian_noise_max_amplitude: 0.015
174
+ time_stretch: 0.01
175
+ time_stretch_min_rate: 0.8
176
+ time_stretch_max_rate: 1.25
177
+
178
+
179
+ inference:
180
+ batch_size: 1
181
+ dim_t: 256
182
+ num_overlap: 4
data_pipeline/seperation/configs/config_musdb18_mel_band_roformer.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 131584
3
+ dim_f: 1024
4
+ dim_t: 256
5
+ hop_length: 512
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ dim: 192
13
+ depth: 8
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ num_bands: 60
20
+ dim_head: 64
21
+ heads: 8
22
+ attn_dropout: 0.1
23
+ ff_dropout: 0.1
24
+ flash_attn: True
25
+ dim_freqs_in: 1025
26
+ sample_rate: 44100 # needed for mel filter bank from librosa
27
+ stft_n_fft: 2048
28
+ stft_hop_length: 512
29
+ stft_win_length: 2048
30
+ stft_normalized: False
31
+ mask_estimator_depth: 2
32
+ multi_stft_resolution_loss_weight: 1.0
33
+ multi_stft_resolutions_window_sizes: !!python/tuple
34
+ - 4096
35
+ - 2048
36
+ - 1024
37
+ - 512
38
+ - 256
39
+ multi_stft_hop_size: 147
40
+ multi_stft_normalized: False
41
+
42
+ training:
43
+ batch_size: 7
44
+ gradient_accumulation_steps: 1
45
+ grad_clip: 0
46
+ instruments:
47
+ - vocals
48
+ - bass
49
+ - drums
50
+ - other
51
+ lr: 5.0e-05
52
+ patience: 2
53
+ reduce_factor: 0.95
54
+ target_instrument: vocals
55
+ num_epochs: 1000
56
+ num_steps: 1000
57
+ q: 0.95
58
+ coarse_loss_clip: true
59
+ ema_momentum: 0.999
60
+ optimizer: adam
61
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
62
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
63
+
64
+ augmentations:
65
+ enable: true # enable or disable all augmentations (to fast disable if needed)
66
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
67
+ loudness_min: 0.5
68
+ loudness_max: 1.5
69
+
70
+ inference:
71
+ batch_size: 1
72
+ dim_t: 256
73
+ num_overlap: 4
data_pipeline/seperation/configs/config_musdb18_scnet.yaml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 264600
3
+ num_channels: 2
4
+ sample_rate: 44100
5
+ min_mean_abs: 0.001
6
+
7
+ model:
8
+ dims: [4, 32, 64, 128]
9
+ bandsplit_ratios: [.175, .392, .433]
10
+ downsample_strides: [1, 4, 16]
11
+ n_conv_modules: [3, 2, 1]
12
+ n_rnn_layers: 6
13
+ rnn_hidden_dim: 128
14
+ n_sources: 4
15
+
16
+ n_fft: 4096
17
+ hop_length: 1024
18
+ win_length: 4096
19
+ stft_normalized: false
20
+
21
+ use_mamba: true
22
+ d_state: 16
23
+ d_conv: 4
24
+ d_expand: 2
25
+
26
+ training:
27
+ batch_size: 10
28
+ gradient_accumulation_steps: 1
29
+ grad_clip: 0
30
+ instruments:
31
+ - vocals
32
+ - bass
33
+ - drums
34
+ - other
35
+ lr: 5.0e-04
36
+ patience: 2
37
+ reduce_factor: 0.95
38
+ target_instrument: null
39
+ num_epochs: 1000
40
+ num_steps: 1000
41
+ q: 0.95
42
+ coarse_loss_clip: true
43
+ ema_momentum: 0.999
44
+ optimizer: adam
45
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
46
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
47
+
48
+ augmentations:
49
+ enable: true # enable or disable all augmentations (to fast disable if needed)
50
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
51
+ loudness_min: 0.5
52
+ loudness_max: 1.5
53
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
54
+ mixup_probs:
55
+ !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
56
+ - 0.2
57
+ - 0.02
58
+ mixup_loudness_min: 0.5
59
+ mixup_loudness_max: 1.5
60
+
61
+ inference:
62
+ batch_size: 1
63
+ dim_t: 256
64
+ num_overlap: 4
data_pipeline/seperation/configs/config_musdb18_segm_models.yaml ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 261632
3
+ dim_f: 4096
4
+ dim_t: 512
5
+ hop_length: 512
6
+ n_fft: 8192
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders-
13
+ decoder_type: unet # unet, fpn
14
+ act: gelu
15
+ num_channels: 128
16
+ num_subbands: 8
17
+
18
+ training:
19
+ batch_size: 7
20
+ gradient_accumulation_steps: 1
21
+ grad_clip: 0
22
+ instruments:
23
+ - vocals
24
+ - bass
25
+ - drums
26
+ - other
27
+ lr: 5.0e-05
28
+ patience: 2
29
+ reduce_factor: 0.95
30
+ target_instrument: null
31
+ num_epochs: 1000
32
+ num_steps: 2000
33
+ q: 0.95
34
+ coarse_loss_clip: true
35
+ ema_momentum: 0.999
36
+ optimizer: adamw
37
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
38
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
39
+
40
+ augmentations:
41
+ enable: true # enable or disable all augmentations (to fast disable if needed)
42
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
43
+ loudness_min: 0.5
44
+ loudness_max: 1.5
45
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
46
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
47
+ - 0.2
48
+ - 0.02
49
+ mixup_loudness_min: 0.5
50
+ mixup_loudness_max: 1.5
51
+
52
+ # apply mp3 compression to mixture only (emulate downloading mp3 from internet)
53
+ mp3_compression_on_mixture: 0.01
54
+ mp3_compression_on_mixture_bitrate_min: 32
55
+ mp3_compression_on_mixture_bitrate_max: 320
56
+ mp3_compression_on_mixture_backend: "lameenc"
57
+
58
+ all:
59
+ channel_shuffle: 0.5 # Set 0 or lower to disable
60
+ random_inverse: 0.1 # inverse track (better lower probability)
61
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
62
+ mp3_compression: 0.01
63
+ mp3_compression_min_bitrate: 32
64
+ mp3_compression_max_bitrate: 320
65
+ mp3_compression_backend: "lameenc"
66
+
67
+ vocals:
68
+ pitch_shift: 0.1
69
+ pitch_shift_min_semitones: -5
70
+ pitch_shift_max_semitones: 5
71
+ seven_band_parametric_eq: 0.25
72
+ seven_band_parametric_eq_min_gain_db: -9
73
+ seven_band_parametric_eq_max_gain_db: 9
74
+ tanh_distortion: 0.1
75
+ tanh_distortion_min: 0.1
76
+ tanh_distortion_max: 0.7
77
+ other:
78
+ pitch_shift: 0.1
79
+ pitch_shift_min_semitones: -4
80
+ pitch_shift_max_semitones: 4
81
+ gaussian_noise: 0.1
82
+ gaussian_noise_min_amplitude: 0.001
83
+ gaussian_noise_max_amplitude: 0.015
84
+ time_stretch: 0.01
85
+ time_stretch_min_rate: 0.8
86
+ time_stretch_max_rate: 1.25
87
+
88
+
89
+ inference:
90
+ batch_size: 1
91
+ dim_t: 512
92
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_bandit_bsrnn_multi_mus64.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "MultiMaskMultiSourceBandSplitRNN"
2
+ audio:
3
+ chunk_size: 264600
4
+ num_channels: 2
5
+ sample_rate: 44100
6
+ min_mean_abs: 0.001
7
+
8
+ model:
9
+ in_channel: 1
10
+ stems: ['vocals', 'other']
11
+ band_specs: "musical"
12
+ n_bands: 64
13
+ fs: 44100
14
+ require_no_overlap: false
15
+ require_no_gap: true
16
+ normalize_channel_independently: false
17
+ treat_channel_as_feature: true
18
+ n_sqm_modules: 8
19
+ emb_dim: 128
20
+ rnn_dim: 256
21
+ bidirectional: true
22
+ rnn_type: "GRU"
23
+ mlp_dim: 512
24
+ hidden_activation: "Tanh"
25
+ hidden_activation_kwargs: null
26
+ complex_mask: true
27
+ n_fft: 2048
28
+ win_length: 2048
29
+ hop_length: 512
30
+ window_fn: "hann_window"
31
+ wkwargs: null
32
+ power: null
33
+ center: true
34
+ normalized: true
35
+ pad_mode: "constant"
36
+ onesided: true
37
+
38
+ training:
39
+ batch_size: 4
40
+ gradient_accumulation_steps: 4
41
+ grad_clip: 0
42
+ instruments:
43
+ - vocals
44
+ - other
45
+ lr: 9.0e-05
46
+ patience: 2
47
+ reduce_factor: 0.95
48
+ target_instrument: null
49
+ num_epochs: 1000
50
+ num_steps: 1000
51
+ q: 0.95
52
+ coarse_loss_clip: true
53
+ ema_momentum: 0.999
54
+ optimizer: adam
55
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
56
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
57
+
58
+ augmentations:
59
+ enable: true # enable or disable all augmentations (to fast disable if needed)
60
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
61
+ loudness_min: 0.5
62
+ loudness_max: 1.5
63
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
64
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
65
+ - 0.2
66
+ - 0.02
67
+ mixup_loudness_min: 0.5
68
+ mixup_loudness_max: 1.5
69
+
70
+ inference:
71
+ batch_size: 1
72
+ dim_t: 256
73
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_bs_roformer.yaml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 131584
3
+ dim_f: 1024
4
+ dim_t: 256
5
+ hop_length: 512
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ dim: 192
13
+ depth: 6
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ freqs_per_bands: !!python/tuple
20
+ - 2
21
+ - 2
22
+ - 2
23
+ - 2
24
+ - 2
25
+ - 2
26
+ - 2
27
+ - 2
28
+ - 2
29
+ - 2
30
+ - 2
31
+ - 2
32
+ - 2
33
+ - 2
34
+ - 2
35
+ - 2
36
+ - 2
37
+ - 2
38
+ - 2
39
+ - 2
40
+ - 2
41
+ - 2
42
+ - 2
43
+ - 2
44
+ - 4
45
+ - 4
46
+ - 4
47
+ - 4
48
+ - 4
49
+ - 4
50
+ - 4
51
+ - 4
52
+ - 4
53
+ - 4
54
+ - 4
55
+ - 4
56
+ - 12
57
+ - 12
58
+ - 12
59
+ - 12
60
+ - 12
61
+ - 12
62
+ - 12
63
+ - 12
64
+ - 24
65
+ - 24
66
+ - 24
67
+ - 24
68
+ - 24
69
+ - 24
70
+ - 24
71
+ - 24
72
+ - 48
73
+ - 48
74
+ - 48
75
+ - 48
76
+ - 48
77
+ - 48
78
+ - 48
79
+ - 48
80
+ - 128
81
+ - 129
82
+ dim_head: 64
83
+ heads: 8
84
+ attn_dropout: 0.1
85
+ ff_dropout: 0.1
86
+ flash_attn: true
87
+ dim_freqs_in: 1025
88
+ stft_n_fft: 2048
89
+ stft_hop_length: 512
90
+ stft_win_length: 2048
91
+ stft_normalized: false
92
+ mask_estimator_depth: 2
93
+ multi_stft_resolution_loss_weight: 1.0
94
+ multi_stft_resolutions_window_sizes: !!python/tuple
95
+ - 4096
96
+ - 2048
97
+ - 1024
98
+ - 512
99
+ - 256
100
+ multi_stft_hop_size: 147
101
+ multi_stft_normalized: False
102
+
103
+ training:
104
+ batch_size: 10
105
+ gradient_accumulation_steps: 1
106
+ grad_clip: 0
107
+ instruments:
108
+ - vocals
109
+ - other
110
+ lr: 5.0e-05
111
+ patience: 2
112
+ reduce_factor: 0.95
113
+ target_instrument: vocals
114
+ num_epochs: 1000
115
+ num_steps: 1000
116
+ q: 0.95
117
+ coarse_loss_clip: true
118
+ ema_momentum: 0.999
119
+ optimizer: adam
120
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
121
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
122
+
123
+ augmentations:
124
+ enable: true # enable or disable all augmentations (to fast disable if needed)
125
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
126
+ loudness_min: 0.5
127
+ loudness_max: 1.5
128
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
129
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
130
+ - 0.2
131
+ - 0.02
132
+ mixup_loudness_min: 0.5
133
+ mixup_loudness_max: 1.5
134
+
135
+ inference:
136
+ batch_size: 1
137
+ dim_t: 256
138
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_htdemucs.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 485100 # samplerate * segment
3
+ min_mean_abs: 0.001
4
+ hop_length: 1024
5
+
6
+ training:
7
+ batch_size: 10
8
+ gradient_accumulation_steps: 1
9
+ grad_clip: 0
10
+ segment: 11
11
+ shift: 1
12
+ samplerate: 44100
13
+ channels: 2
14
+ normalize: true
15
+ instruments: ['vocals', 'other']
16
+ target_instrument: null
17
+ num_epochs: 1000
18
+ num_steps: 1000
19
+ optimizer: adam
20
+ lr: 9.0e-05
21
+ patience: 2
22
+ reduce_factor: 0.95
23
+ q: 0.95
24
+ coarse_loss_clip: true
25
+ ema_momentum: 0.999
26
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
27
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
28
+
29
+ augmentations:
30
+ enable: true # enable or disable all augmentations (to fast disable if needed)
31
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
32
+ loudness_min: 0.5
33
+ loudness_max: 1.5
34
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
35
+ mixup_probs: [0.2, 0.02]
36
+ mixup_loudness_min: 0.5
37
+ mixup_loudness_max: 1.5
38
+
39
+ inference:
40
+ num_overlap: 2
41
+ batch_size: 8
42
+
43
+ model: htdemucs
44
+
45
+ htdemucs: # see demucs/htdemucs.py for a detailed description
46
+ # Channels
47
+ channels: 48
48
+ channels_time:
49
+ growth: 2
50
+ # STFT
51
+ num_subbands: 1
52
+ nfft: 4096
53
+ wiener_iters: 0
54
+ end_iters: 0
55
+ wiener_residual: false
56
+ cac: true
57
+ # Main structure
58
+ depth: 4
59
+ rewrite: true
60
+ # Frequency Branch
61
+ multi_freqs: []
62
+ multi_freqs_depth: 3
63
+ freq_emb: 0.2
64
+ emb_scale: 10
65
+ emb_smooth: true
66
+ # Convolutions
67
+ kernel_size: 8
68
+ stride: 4
69
+ time_stride: 2
70
+ context: 1
71
+ context_enc: 0
72
+ # normalization
73
+ norm_starts: 4
74
+ norm_groups: 4
75
+ # DConv residual branch
76
+ dconv_mode: 3
77
+ dconv_depth: 2
78
+ dconv_comp: 8
79
+ dconv_init: 1e-3
80
+ # Before the Transformer
81
+ bottom_channels: 512
82
+ # CrossTransformer
83
+ # ------ Common to all
84
+ # Regular parameters
85
+ t_layers: 5
86
+ t_hidden_scale: 4.0
87
+ t_heads: 8
88
+ t_dropout: 0.0
89
+ t_layer_scale: True
90
+ t_gelu: True
91
+ # ------------- Positional Embedding
92
+ t_emb: sin
93
+ t_max_positions: 10000 # for the scaled embedding
94
+ t_max_period: 10000.0
95
+ t_weight_pos_embed: 1.0
96
+ t_cape_mean_normalize: True
97
+ t_cape_augment: True
98
+ t_cape_glob_loc_scale: [5000.0, 1.0, 1.4]
99
+ t_sin_random_shift: 0
100
+ # ------------- norm before a transformer encoder
101
+ t_norm_in: True
102
+ t_norm_in_group: False
103
+ # ------------- norm inside the encoder
104
+ t_group_norm: False
105
+ t_norm_first: True
106
+ t_norm_out: True
107
+ # ------------- optim
108
+ t_weight_decay: 0.0
109
+ t_lr:
110
+ # ------------- sparsity
111
+ t_sparse_self_attn: False
112
+ t_sparse_cross_attn: False
113
+ t_mask_type: diag
114
+ t_mask_random_seed: 42
115
+ t_sparse_attn_window: 400
116
+ t_global_window: 100
117
+ t_sparsity: 0.95
118
+ t_auto_sparsity: False
119
+ # Cross Encoder First (False)
120
+ t_cross_first: False
121
+ # Weight init
122
+ rescale: 0.1
123
+
data_pipeline/seperation/configs/config_vocals_mdx23c.yaml ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 261120
3
+ dim_f: 4096
4
+ dim_t: 256
5
+ hop_length: 1024
6
+ n_fft: 8192
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ act: gelu
13
+ bottleneck_factor: 4
14
+ growth: 128
15
+ norm: InstanceNorm
16
+ num_blocks_per_scale: 2
17
+ num_channels: 128
18
+ num_scales: 5
19
+ num_subbands: 4
20
+ scale:
21
+ - 2
22
+ - 2
23
+
24
+ training:
25
+ batch_size: 6
26
+ gradient_accumulation_steps: 1
27
+ grad_clip: 0
28
+ instruments:
29
+ - vocals
30
+ - other
31
+ lr: 9.0e-05
32
+ patience: 2
33
+ reduce_factor: 0.95
34
+ target_instrument: null
35
+ num_epochs: 1000
36
+ num_steps: 1000
37
+ q: 0.95
38
+ coarse_loss_clip: true
39
+ ema_momentum: 0.999
40
+ optimizer: adam
41
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
42
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
43
+
44
+ augmentations:
45
+ enable: true # enable or disable all augmentations (to fast disable if needed)
46
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
47
+ loudness_min: 0.5
48
+ loudness_max: 1.5
49
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
50
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
51
+ - 0.2
52
+ - 0.02
53
+ mixup_loudness_min: 0.5
54
+ mixup_loudness_max: 1.5
55
+
56
+ # apply mp3 compression to mixture only (emulate downloading mp3 from internet)
57
+ mp3_compression_on_mixture: 0.01
58
+ mp3_compression_on_mixture_bitrate_min: 32
59
+ mp3_compression_on_mixture_bitrate_max: 320
60
+ mp3_compression_on_mixture_backend: "lameenc"
61
+
62
+ all:
63
+ channel_shuffle: 0.5 # Set 0 or lower to disable
64
+ random_inverse: 0.1 # inverse track (better lower probability)
65
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
66
+ mp3_compression: 0.01
67
+ mp3_compression_min_bitrate: 32
68
+ mp3_compression_max_bitrate: 320
69
+ mp3_compression_backend: "lameenc"
70
+
71
+ vocals:
72
+ pitch_shift: 0.1
73
+ pitch_shift_min_semitones: -5
74
+ pitch_shift_max_semitones: 5
75
+ seven_band_parametric_eq: 0.25
76
+ seven_band_parametric_eq_min_gain_db: -9
77
+ seven_band_parametric_eq_max_gain_db: 9
78
+ tanh_distortion: 0.1
79
+ tanh_distortion_min: 0.1
80
+ tanh_distortion_max: 0.7
81
+ other:
82
+ pitch_shift: 0.1
83
+ pitch_shift_min_semitones: -4
84
+ pitch_shift_max_semitones: 4
85
+ gaussian_noise: 0.1
86
+ gaussian_noise_min_amplitude: 0.001
87
+ gaussian_noise_max_amplitude: 0.015
88
+ time_stretch: 0.01
89
+ time_stretch_min_rate: 0.8
90
+ time_stretch_max_rate: 1.25
91
+
92
+ inference:
93
+ batch_size: 1
94
+ dim_t: 256
95
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_mel_band_roformer.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 131584
3
+ dim_f: 1024
4
+ dim_t: 256
5
+ hop_length: 512
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ dim: 192
13
+ depth: 8
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ num_bands: 60
20
+ dim_head: 64
21
+ heads: 8
22
+ attn_dropout: 0.1
23
+ ff_dropout: 0.1
24
+ flash_attn: True
25
+ dim_freqs_in: 1025
26
+ sample_rate: 44100 # needed for mel filter bank from librosa
27
+ stft_n_fft: 2048
28
+ stft_hop_length: 512
29
+ stft_win_length: 2048
30
+ stft_normalized: False
31
+ mask_estimator_depth: 2
32
+ multi_stft_resolution_loss_weight: 1.0
33
+ multi_stft_resolutions_window_sizes: !!python/tuple
34
+ - 4096
35
+ - 2048
36
+ - 1024
37
+ - 512
38
+ - 256
39
+ multi_stft_hop_size: 147
40
+ multi_stft_normalized: False
41
+
42
+ training:
43
+ batch_size: 7
44
+ gradient_accumulation_steps: 1
45
+ grad_clip: 0
46
+ instruments:
47
+ - vocals
48
+ - other
49
+ lr: 5.0e-05
50
+ patience: 2
51
+ reduce_factor: 0.95
52
+ target_instrument: vocals
53
+ num_epochs: 1000
54
+ num_steps: 1000
55
+ q: 0.95
56
+ coarse_loss_clip: true
57
+ ema_momentum: 0.999
58
+ optimizer: adam
59
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
60
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
61
+
62
+ augmentations:
63
+ enable: true # enable or disable all augmentations (to fast disable if needed)
64
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
65
+ loudness_min: 0.5
66
+ loudness_max: 1.5
67
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
68
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
69
+ - 0.2
70
+ - 0.02
71
+ mixup_loudness_min: 0.5
72
+ mixup_loudness_max: 1.5
73
+
74
+ inference:
75
+ batch_size: 1
76
+ dim_t: 256
77
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_scnet.yaml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 264600
3
+ num_channels: 2
4
+ sample_rate: 44100
5
+ min_mean_abs: 0.000
6
+
7
+ model:
8
+ sources: ['vocals', 'other']
9
+ audio_channels: 2
10
+ # dims: [4, 32, 64, 128] # small version
11
+ dims: [4, 64, 128, 256]
12
+ nfft: 4096
13
+ hop_size: 1024
14
+ win_size: 4096
15
+ normalized: True
16
+ band_configs: {
17
+ 'low': { 'SR': .175, 'stride': 1, 'kernel': 3 },
18
+ 'mid': { 'SR': .392, 'stride': 4, 'kernel': 4 },
19
+ 'high': { 'SR': .433, 'stride': 16, 'kernel': 16 }
20
+ }
21
+ conv_depths: [3, 2, 1]
22
+ compress: 4
23
+ conv_kernel: 3
24
+ # Dual-path RNN
25
+ num_dplayer: 6
26
+ expand: 1
27
+ # mamba
28
+ use_mamba: False
29
+ mamba_config: {
30
+ 'd_stat': 16,
31
+ 'd_conv': 4,
32
+ 'd_expand': 2
33
+ }
34
+
35
+ training:
36
+ batch_size: 4
37
+ gradient_accumulation_steps: 2
38
+ grad_clip: 0
39
+ instruments:
40
+ - vocals
41
+ - other
42
+ lr: 5.0e-04
43
+ patience: 2
44
+ reduce_factor: 0.95
45
+ target_instrument: null
46
+ num_epochs: 1000
47
+ num_steps: 1000
48
+ q: 0.95
49
+ coarse_loss_clip: true
50
+ ema_momentum: 0.999
51
+ optimizer: adam
52
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
53
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
54
+
55
+ augmentations:
56
+ enable: true # enable or disable all augmentations (to fast disable if needed)
57
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
58
+ loudness_min: 0.5
59
+ loudness_max: 1.5
60
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
61
+ mixup_probs:
62
+ !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
63
+ - 0.2
64
+ - 0.02
65
+ mixup_loudness_min: 0.5
66
+ mixup_loudness_max: 1.5
67
+
68
+ inference:
69
+ batch_size: 8
70
+ dim_t: 256
71
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_scnet_unofficial.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 264600
3
+ num_channels: 2
4
+ sample_rate: 44100
5
+ min_mean_abs: 0.000
6
+
7
+ model:
8
+ dims: [4, 32, 64, 128]
9
+ bandsplit_ratios: [.175, .392, .433]
10
+ downsample_strides: [1, 4, 16]
11
+ n_conv_modules: [3, 2, 1]
12
+ n_rnn_layers: 6
13
+ rnn_hidden_dim: 128
14
+ n_sources: 2
15
+
16
+ n_fft: 4096
17
+ hop_length: 1024
18
+ win_length: 4096
19
+ stft_normalized: false
20
+
21
+ use_mamba: false
22
+ d_state: 16
23
+ d_conv: 4
24
+ d_expand: 2
25
+
26
+ training:
27
+ batch_size: 10
28
+ gradient_accumulation_steps: 2
29
+ grad_clip: 0
30
+ instruments:
31
+ - vocals
32
+ - other
33
+ lr: 5.0e-04
34
+ patience: 2
35
+ reduce_factor: 0.95
36
+ target_instrument: null
37
+ num_epochs: 1000
38
+ num_steps: 1000
39
+ q: 0.95
40
+ coarse_loss_clip: true
41
+ ema_momentum: 0.999
42
+ optimizer: adam
43
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
44
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
45
+
46
+ augmentations:
47
+ enable: true # enable or disable all augmentations (to fast disable if needed)
48
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
49
+ loudness_min: 0.5
50
+ loudness_max: 1.5
51
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
52
+ mixup_probs:
53
+ !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
54
+ - 0.2
55
+ - 0.02
56
+ mixup_loudness_min: 0.5
57
+ mixup_loudness_max: 1.5
58
+
59
+ inference:
60
+ batch_size: 8
61
+ dim_t: 256
62
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_segm_models.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 261632
3
+ dim_f: 4096
4
+ dim_t: 512
5
+ hop_length: 512
6
+ n_fft: 8192
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders-
13
+ decoder_type: unet # unet, fpn
14
+ act: gelu
15
+ num_channels: 128
16
+ num_subbands: 8
17
+
18
+ loss_multistft:
19
+ fft_sizes:
20
+ - 1024
21
+ - 2048
22
+ - 4096
23
+ hop_sizes:
24
+ - 512
25
+ - 1024
26
+ - 2048
27
+ win_lengths:
28
+ - 1024
29
+ - 2048
30
+ - 4096
31
+ window: "hann_window"
32
+ scale: "mel"
33
+ n_bins: 128
34
+ sample_rate: 44100
35
+ perceptual_weighting: true
36
+ w_sc: 1.0
37
+ w_log_mag: 1.0
38
+ w_lin_mag: 0.0
39
+ w_phs: 0.0
40
+ mag_distance: "L1"
41
+
42
+
43
+ training:
44
+ batch_size: 8
45
+ gradient_accumulation_steps: 1
46
+ grad_clip: 0
47
+ instruments:
48
+ - vocals
49
+ - other
50
+ lr: 5.0e-05
51
+ patience: 2
52
+ reduce_factor: 0.95
53
+ target_instrument: null
54
+ num_epochs: 1000
55
+ num_steps: 2000
56
+ q: 0.95
57
+ coarse_loss_clip: true
58
+ ema_momentum: 0.999
59
+ optimizer: adamw
60
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
61
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
62
+
63
+ augmentations:
64
+ enable: true # enable or disable all augmentations (to fast disable if needed)
65
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
66
+ loudness_min: 0.5
67
+ loudness_max: 1.5
68
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
69
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
70
+ - 0.2
71
+ - 0.02
72
+ mixup_loudness_min: 0.5
73
+ mixup_loudness_max: 1.5
74
+
75
+ inference:
76
+ batch_size: 1
77
+ dim_t: 512
78
+ num_overlap: 4
data_pipeline/seperation/configs/config_vocals_swin_upernet.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 261632
3
+ dim_f: 4096
4
+ dim_t: 512
5
+ hop_length: 512
6
+ n_fft: 8192
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ act: gelu
13
+ num_channels: 16
14
+ num_subbands: 8
15
+
16
+ training:
17
+ batch_size: 14
18
+ gradient_accumulation_steps: 4
19
+ grad_clip: 0
20
+ instruments:
21
+ - vocals
22
+ - other
23
+ lr: 3.0e-05
24
+ patience: 2
25
+ reduce_factor: 0.95
26
+ target_instrument: null
27
+ num_epochs: 1000
28
+ num_steps: 1000
29
+ q: 0.95
30
+ coarse_loss_clip: true
31
+ ema_momentum: 0.999
32
+ optimizer: adamw
33
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
34
+
35
+ augmentations:
36
+ enable: true # enable or disable all augmentations (to fast disable if needed)
37
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
38
+ loudness_min: 0.5
39
+ loudness_max: 1.5
40
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
41
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
42
+ - 0.2
43
+ - 0.02
44
+ mixup_loudness_min: 0.5
45
+ mixup_loudness_max: 1.5
46
+
47
+ inference:
48
+ batch_size: 1
49
+ dim_t: 512
50
+ num_overlap: 4
data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 352800
3
+ dim_f: 1024
4
+ dim_t: 801 # don't work (use in model)
5
+ hop_length: 441 # don't work (use in model)
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.000
10
+
11
+ model:
12
+ dim: 512
13
+ depth: 12
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ freqs_per_bands: !!python/tuple
20
+ - 2
21
+ - 2
22
+ - 2
23
+ - 2
24
+ - 2
25
+ - 2
26
+ - 2
27
+ - 2
28
+ - 2
29
+ - 2
30
+ - 2
31
+ - 2
32
+ - 2
33
+ - 2
34
+ - 2
35
+ - 2
36
+ - 2
37
+ - 2
38
+ - 2
39
+ - 2
40
+ - 2
41
+ - 2
42
+ - 2
43
+ - 2
44
+ - 4
45
+ - 4
46
+ - 4
47
+ - 4
48
+ - 4
49
+ - 4
50
+ - 4
51
+ - 4
52
+ - 4
53
+ - 4
54
+ - 4
55
+ - 4
56
+ - 12
57
+ - 12
58
+ - 12
59
+ - 12
60
+ - 12
61
+ - 12
62
+ - 12
63
+ - 12
64
+ - 24
65
+ - 24
66
+ - 24
67
+ - 24
68
+ - 24
69
+ - 24
70
+ - 24
71
+ - 24
72
+ - 48
73
+ - 48
74
+ - 48
75
+ - 48
76
+ - 48
77
+ - 48
78
+ - 48
79
+ - 48
80
+ - 128
81
+ - 129
82
+ dim_head: 64
83
+ heads: 8
84
+ attn_dropout: 0.1
85
+ ff_dropout: 0.1
86
+ flash_attn: true
87
+ dim_freqs_in: 1025
88
+ stft_n_fft: 2048
89
+ stft_hop_length: 441
90
+ stft_win_length: 2048
91
+ stft_normalized: false
92
+ mask_estimator_depth: 2
93
+ multi_stft_resolution_loss_weight: 1.0
94
+ multi_stft_resolutions_window_sizes: !!python/tuple
95
+ - 4096
96
+ - 2048
97
+ - 1024
98
+ - 512
99
+ - 256
100
+ multi_stft_hop_size: 147
101
+ multi_stft_normalized: False
102
+
103
+ training:
104
+ batch_size: 2
105
+ gradient_accumulation_steps: 1
106
+ grad_clip: 0
107
+ instruments:
108
+ - vocals
109
+ - other
110
+ lr: 1.0e-05
111
+ patience: 2
112
+ reduce_factor: 0.95
113
+ target_instrument: vocals
114
+ num_epochs: 1000
115
+ num_steps: 1000
116
+ q: 0.95
117
+ coarse_loss_clip: true
118
+ ema_momentum: 0.999
119
+ optimizer: adam
120
+ other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
121
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
122
+
123
+ inference:
124
+ batch_size: 4
125
+ dim_t: 801
126
+ num_overlap: 2
data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 131584
3
+ dim_f: 1024
4
+ dim_t: 256
5
+ hop_length: 512
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.001
10
+
11
+ model:
12
+ dim: 384
13
+ depth: 12
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ freqs_per_bands: !!python/tuple
20
+ - 2
21
+ - 2
22
+ - 2
23
+ - 2
24
+ - 2
25
+ - 2
26
+ - 2
27
+ - 2
28
+ - 2
29
+ - 2
30
+ - 2
31
+ - 2
32
+ - 2
33
+ - 2
34
+ - 2
35
+ - 2
36
+ - 2
37
+ - 2
38
+ - 2
39
+ - 2
40
+ - 2
41
+ - 2
42
+ - 2
43
+ - 2
44
+ - 4
45
+ - 4
46
+ - 4
47
+ - 4
48
+ - 4
49
+ - 4
50
+ - 4
51
+ - 4
52
+ - 4
53
+ - 4
54
+ - 4
55
+ - 4
56
+ - 12
57
+ - 12
58
+ - 12
59
+ - 12
60
+ - 12
61
+ - 12
62
+ - 12
63
+ - 12
64
+ - 24
65
+ - 24
66
+ - 24
67
+ - 24
68
+ - 24
69
+ - 24
70
+ - 24
71
+ - 24
72
+ - 48
73
+ - 48
74
+ - 48
75
+ - 48
76
+ - 48
77
+ - 48
78
+ - 48
79
+ - 48
80
+ - 128
81
+ - 129
82
+ dim_head: 64
83
+ heads: 8
84
+ attn_dropout: 0.1
85
+ ff_dropout: 0.1
86
+ flash_attn: true
87
+ dim_freqs_in: 1025
88
+ stft_n_fft: 2048
89
+ stft_hop_length: 512
90
+ stft_win_length: 2048
91
+ stft_normalized: false
92
+ mask_estimator_depth: 2
93
+ multi_stft_resolution_loss_weight: 1.0
94
+ multi_stft_resolutions_window_sizes: !!python/tuple
95
+ - 4096
96
+ - 2048
97
+ - 1024
98
+ - 512
99
+ - 256
100
+ multi_stft_hop_size: 147
101
+ multi_stft_normalized: False
102
+
103
+ training:
104
+ batch_size: 4
105
+ gradient_accumulation_steps: 1
106
+ grad_clip: 0
107
+ instruments:
108
+ - vocals
109
+ - other
110
+ lr: 5.0e-05
111
+ patience: 2
112
+ reduce_factor: 0.95
113
+ target_instrument: other
114
+ num_epochs: 1000
115
+ num_steps: 1000
116
+ q: 0.95
117
+ coarse_loss_clip: true
118
+ ema_momentum: 0.999
119
+ optimizer: adam
120
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
121
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
122
+
123
+ augmentations:
124
+ enable: true # enable or disable all augmentations (to fast disable if needed)
125
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
126
+ loudness_min: 0.5
127
+ loudness_max: 1.5
128
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
129
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
130
+ - 0.2
131
+ - 0.02
132
+ mixup_loudness_min: 0.5
133
+ mixup_loudness_max: 1.5
134
+
135
+ inference:
136
+ batch_size: 8
137
+ dim_t: 512
138
+ num_overlap: 2
data_pipeline/seperation/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ chunk_size: 352800
3
+ dim_f: 1024
4
+ dim_t: 801 # don't work (use in model)
5
+ hop_length: 441 # don't work (use in model)
6
+ n_fft: 2048
7
+ num_channels: 2
8
+ sample_rate: 44100
9
+ min_mean_abs: 0.000
10
+
11
+ model:
12
+ dim: 384
13
+ depth: 12
14
+ stereo: true
15
+ num_stems: 1
16
+ time_transformer_depth: 1
17
+ freq_transformer_depth: 1
18
+ linear_transformer_depth: 0
19
+ num_bands: 60
20
+ dim_head: 64
21
+ heads: 8
22
+ attn_dropout: 0.1
23
+ ff_dropout: 0.1
24
+ flash_attn: True
25
+ dim_freqs_in: 1025
26
+ sample_rate: 44100 # needed for mel filter bank from librosa
27
+ stft_n_fft: 2048
28
+ stft_hop_length: 441
29
+ stft_win_length: 2048
30
+ stft_normalized: False
31
+ mask_estimator_depth: 2
32
+ multi_stft_resolution_loss_weight: 1.0
33
+ multi_stft_resolutions_window_sizes: !!python/tuple
34
+ - 4096
35
+ - 2048
36
+ - 1024
37
+ - 512
38
+ - 256
39
+ multi_stft_hop_size: 147
40
+ multi_stft_normalized: False
41
+
42
+ training:
43
+ batch_size: 1
44
+ gradient_accumulation_steps: 8
45
+ grad_clip: 0
46
+ instruments:
47
+ - vocals
48
+ - other
49
+ lr: 4.0e-05
50
+ patience: 2
51
+ reduce_factor: 0.95
52
+ target_instrument: vocals
53
+ num_epochs: 1000
54
+ num_steps: 1000
55
+ q: 0.95
56
+ coarse_loss_clip: true
57
+ ema_momentum: 0.999
58
+ optimizer: adam
59
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
60
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
61
+
62
+ inference:
63
+ batch_size: 4
64
+ dim_t: 801
65
+ num_overlap: 2
data_pipeline/seperation/dataset.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+ __author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/'
3
+
4
+
5
+ import os
6
+ import random
7
+ import numpy as np
8
+ import torch
9
+ import soundfile as sf
10
+ import pickle
11
+ import time
12
+ from tqdm import tqdm
13
+ from glob import glob
14
+ import audiomentations as AU
15
+ import pedalboard as PB
16
+ import warnings
17
+ warnings.filterwarnings("ignore")
18
+
19
+
20
+ def load_chunk(path, length, chunk_size, offset=None):
21
+ if chunk_size <= length:
22
+ if offset is None:
23
+ offset = np.random.randint(length - chunk_size + 1)
24
+ x = sf.read(path, dtype='float32', start=offset, frames=chunk_size)[0]
25
+ else:
26
+ x = sf.read(path, dtype='float32')[0]
27
+ pad = np.zeros([chunk_size - length, 2])
28
+ x = np.concatenate([x, pad])
29
+ return x.T
30
+
31
+
32
+ class MSSDataset(torch.utils.data.Dataset):
33
+ def __init__(self, config, data_path, metadata_path="metadata.pkl", dataset_type=1, batch_size=None):
34
+ self.config = config
35
+ self.dataset_type = dataset_type # 1, 2, 3 or 4
36
+ self.instruments = instruments = config.training.instruments
37
+ if batch_size is None:
38
+ batch_size = config.training.batch_size
39
+ self.batch_size = batch_size
40
+ self.file_types = ['wav', 'flac']
41
+
42
+ # Augmentation block
43
+ self.aug = False
44
+ if 'augmentations' in config:
45
+ if config['augmentations'].enable is True:
46
+ print('Use augmentation for training')
47
+ self.aug = True
48
+ else:
49
+ print('There is no augmentations block in config. Augmentations disabled for training...')
50
+
51
+ # metadata_path = data_path + '/metadata'
52
+ try:
53
+ metadata = pickle.load(open(metadata_path, 'rb'))
54
+ print('Loading songs data from cache: {}. If you updated dataset remove {} before training!'.format(metadata_path, os.path.basename(metadata_path)))
55
+ except Exception:
56
+ print('Collecting metadata for', str(data_path), 'Dataset type:', self.dataset_type)
57
+ if self.dataset_type in [1, 4]:
58
+ metadata = []
59
+ track_paths = []
60
+ if type(data_path) == list:
61
+ for tp in data_path:
62
+ track_paths += sorted(glob(tp + '/*'))
63
+ else:
64
+ track_paths += sorted(glob(data_path + '/*'))
65
+
66
+ track_paths = [path for path in track_paths if os.path.basename(path)[0] != '.' and os.path.isdir(path)]
67
+ for path in tqdm(track_paths):
68
+ # Check lengths of all instruments (it can be different in some cases)
69
+ lengths_arr = []
70
+ for instr in instruments:
71
+ length = -1
72
+ for extension in self.file_types:
73
+ path_to_audio_file = path + '/{}.{}'.format(instr, extension)
74
+ if os.path.isfile(path_to_audio_file):
75
+ length = len(sf.read(path_to_audio_file)[0])
76
+ break
77
+ if length == -1:
78
+ print('Cant find file "{}" in folder {}'.format(instr, path))
79
+ continue
80
+ lengths_arr.append(length)
81
+ lengths_arr = np.array(lengths_arr)
82
+ if lengths_arr.min() != lengths_arr.max():
83
+ print('Warning: lengths of stems are different for path: {}. ({} != {})'.format(
84
+ path,
85
+ lengths_arr.min(),
86
+ lengths_arr.max())
87
+ )
88
+ # We use minimum to allow overflow for soundfile read in non-equal length cases
89
+ metadata.append((path, lengths_arr.min()))
90
+ elif self.dataset_type == 2:
91
+ metadata = dict()
92
+ for instr in self.instruments:
93
+ metadata[instr] = []
94
+ track_paths = []
95
+ if type(data_path) == list:
96
+ for tp in data_path:
97
+ track_paths += sorted(glob(tp + '/{}/*.wav'.format(instr)))
98
+ track_paths += sorted(glob(tp + '/{}/*.flac'.format(instr)))
99
+ else:
100
+ track_paths += sorted(glob(data_path + '/{}/*.wav'.format(instr)))
101
+ track_paths += sorted(glob(data_path + '/{}/*.flac'.format(instr)))
102
+
103
+ for path in tqdm(track_paths):
104
+ length = len(sf.read(path)[0])
105
+ metadata[instr].append((path, length))
106
+ elif self.dataset_type == 3:
107
+ import pandas as pd
108
+ if type(data_path) != list:
109
+ data_path = [data_path]
110
+
111
+ metadata = dict()
112
+ for i in range(len(data_path)):
113
+ print('Reading tracks from: {}'.format(data_path[i]))
114
+ df = pd.read_csv(data_path[i])
115
+
116
+ skipped = 0
117
+ for instr in self.instruments:
118
+ part = df[df['instrum'] == instr].copy()
119
+ print('Tracks found for {}: {}'.format(instr, len(part)))
120
+ for instr in self.instruments:
121
+ part = df[df['instrum'] == instr].copy()
122
+ metadata[instr] = []
123
+ track_paths = list(part['path'].values)
124
+ for path in tqdm(track_paths):
125
+ if not os.path.isfile(path):
126
+ print('Cant find track: {}'.format(path))
127
+ skipped += 1
128
+ continue
129
+ # print(path)
130
+ try:
131
+ length = len(sf.read(path)[0])
132
+ except:
133
+ print('Problem with path: {}'.format(path))
134
+ skipped += 1
135
+ continue
136
+ metadata[instr].append((path, length))
137
+ if skipped > 0:
138
+ print('Missing tracks: {} from {}'.format(skipped, len(df)))
139
+ else:
140
+ print('Unknown dataset type: {}. Must be 1, 2 or 3'.format(self.dataset_type))
141
+ exit()
142
+
143
+ pickle.dump(metadata, open(metadata_path, 'wb'))
144
+
145
+ if self.dataset_type in [1, 4]:
146
+ if len(metadata) > 0:
147
+ print('Found tracks in dataset: {}'.format(len(metadata)))
148
+ else:
149
+ print('No tracks found for training. Check paths you provided!')
150
+ exit()
151
+ else:
152
+ for instr in self.instruments:
153
+ print('Found tracks for {} in dataset: {}'.format(instr, len(metadata[instr])))
154
+ self.metadata = metadata
155
+ self.chunk_size = config.audio.chunk_size
156
+ self.min_mean_abs = config.audio.min_mean_abs
157
+
158
+ def __len__(self):
159
+ return self.config.training.num_steps * self.batch_size
160
+
161
+ def load_source(self, metadata, instr):
162
+ while True:
163
+ if self.dataset_type in [1, 4]:
164
+ track_path, track_length = random.choice(metadata)
165
+ for extension in self.file_types:
166
+ path_to_audio_file = track_path + '/{}.{}'.format(instr, extension)
167
+ if os.path.isfile(path_to_audio_file):
168
+ try:
169
+ source = load_chunk(path_to_audio_file, track_length, self.chunk_size)
170
+ except Exception as e:
171
+ # Sometimes error during FLAC reading, catch it and use zero stem
172
+ print('Error: {} Path: {}'.format(e, path_to_audio_file))
173
+ source = np.zeros((2, self.chunk_size), dtype=np.float32)
174
+ break
175
+ else:
176
+ track_path, track_length = random.choice(metadata[instr])
177
+ try:
178
+ source = load_chunk(track_path, track_length, self.chunk_size)
179
+ except Exception as e:
180
+ # Sometimes error during FLAC reading, catch it and use zero stem
181
+ print('Error: {} Path: {}'.format(e, track_path))
182
+ source = np.zeros((2, self.chunk_size), dtype=np.float32)
183
+
184
+ if np.abs(source).mean() >= self.min_mean_abs: # remove quiet chunks
185
+ break
186
+ if self.aug:
187
+ source = self.augm_data(source, instr)
188
+ return torch.tensor(source, dtype=torch.float32)
189
+
190
+ def load_random_mix(self):
191
+ res = []
192
+ for instr in self.instruments:
193
+ s1 = self.load_source(self.metadata, instr)
194
+ # Mixup augmentation. Multiple mix of same type of stems
195
+ if self.aug:
196
+ if 'mixup' in self.config['augmentations']:
197
+ if self.config['augmentations'].mixup:
198
+ mixup = [s1]
199
+ for prob in self.config.augmentations.mixup_probs:
200
+ if random.uniform(0, 1) < prob:
201
+ s2 = self.load_source(self.metadata, instr)
202
+ mixup.append(s2)
203
+ mixup = torch.stack(mixup, dim=0)
204
+ loud_values = np.random.uniform(
205
+ low=self.config.augmentations.loudness_min,
206
+ high=self.config.augmentations.loudness_max,
207
+ size=(len(mixup),)
208
+ )
209
+ loud_values = torch.tensor(loud_values, dtype=torch.float32)
210
+ mixup *= loud_values[:, None, None]
211
+ s1 = mixup.mean(dim=0, dtype=torch.float32)
212
+ res.append(s1)
213
+ res = torch.stack(res)
214
+ return res
215
+
216
+ def load_aligned_data(self):
217
+ track_path, track_length = random.choice(self.metadata)
218
+ res = []
219
+ for i in self.instruments:
220
+ attempts = 10
221
+ while attempts:
222
+ for extension in self.file_types:
223
+ path_to_audio_file = track_path + '/{}.{}'.format(i, extension)
224
+ if os.path.isfile(path_to_audio_file):
225
+ try:
226
+ source = load_chunk(path_to_audio_file, track_length, self.chunk_size)
227
+ except Exception as e:
228
+ # Sometimes error during FLAC reading, catch it and use zero stem
229
+ print('Error: {} Path: {}'.format(e, path_to_audio_file))
230
+ source = np.zeros((2, self.chunk_size), dtype=np.float32)
231
+ break
232
+ if np.abs(source).mean() >= self.min_mean_abs: # remove quiet chunks
233
+ break
234
+ attempts -= 1
235
+ if attempts <= 0:
236
+ print('Attempts max!', track_path)
237
+ res.append(source)
238
+ res = np.stack(res, axis=0)
239
+ if self.aug:
240
+ for i, instr in enumerate(self.instruments):
241
+ res[i] = self.augm_data(res[i], instr)
242
+ return torch.tensor(res, dtype=torch.float32)
243
+
244
+ def augm_data(self, source, instr):
245
+ # source.shape = (2, 261120) - first channels, second length
246
+ source_shape = source.shape
247
+ applied_augs = []
248
+ if 'all' in self.config['augmentations']:
249
+ augs = self.config['augmentations']['all']
250
+ else:
251
+ augs = dict()
252
+
253
+ # We need to add to all augmentations specific augs for stem. And rewrite values if needed
254
+ if instr in self.config['augmentations']:
255
+ for el in self.config['augmentations'][instr]:
256
+ augs[el] = self.config['augmentations'][instr][el]
257
+
258
+ # Channel shuffle
259
+ if 'channel_shuffle' in augs:
260
+ if augs['channel_shuffle'] > 0:
261
+ if random.uniform(0, 1) < augs['channel_shuffle']:
262
+ source = source[::-1].copy()
263
+ applied_augs.append('channel_shuffle')
264
+ # Random inverse
265
+ if 'random_inverse' in augs:
266
+ if augs['random_inverse'] > 0:
267
+ if random.uniform(0, 1) < augs['random_inverse']:
268
+ source = source[:, ::-1].copy()
269
+ applied_augs.append('random_inverse')
270
+ # Random polarity (multiply -1)
271
+ if 'random_polarity' in augs:
272
+ if augs['random_polarity'] > 0:
273
+ if random.uniform(0, 1) < augs['random_polarity']:
274
+ source = -source.copy()
275
+ applied_augs.append('random_polarity')
276
+ # Random pitch shift
277
+ if 'pitch_shift' in augs:
278
+ if augs['pitch_shift'] > 0:
279
+ if random.uniform(0, 1) < augs['pitch_shift']:
280
+ apply_aug = AU.PitchShift(
281
+ min_semitones=augs['pitch_shift_min_semitones'],
282
+ max_semitones=augs['pitch_shift_max_semitones'],
283
+ p=1.0
284
+ )
285
+ source = apply_aug(samples=source, sample_rate=44100)
286
+ applied_augs.append('pitch_shift')
287
+ # Random seven band parametric eq
288
+ if 'seven_band_parametric_eq' in augs:
289
+ if augs['seven_band_parametric_eq'] > 0:
290
+ if random.uniform(0, 1) < augs['seven_band_parametric_eq']:
291
+ apply_aug = AU.SevenBandParametricEQ(
292
+ min_gain_db=augs['seven_band_parametric_eq_min_gain_db'],
293
+ max_gain_db=augs['seven_band_parametric_eq_max_gain_db'],
294
+ p=1.0
295
+ )
296
+ source = apply_aug(samples=source, sample_rate=44100)
297
+ applied_augs.append('seven_band_parametric_eq')
298
+ # Random tanh distortion
299
+ if 'tanh_distortion' in augs:
300
+ if augs['tanh_distortion'] > 0:
301
+ if random.uniform(0, 1) < augs['tanh_distortion']:
302
+ apply_aug = AU.TanhDistortion(
303
+ min_distortion=augs['tanh_distortion_min'],
304
+ max_distortion=augs['tanh_distortion_max'],
305
+ p=1.0
306
+ )
307
+ source = apply_aug(samples=source, sample_rate=44100)
308
+ applied_augs.append('tanh_distortion')
309
+ # Random MP3 Compression
310
+ if 'mp3_compression' in augs:
311
+ if augs['mp3_compression'] > 0:
312
+ if random.uniform(0, 1) < augs['mp3_compression']:
313
+ apply_aug = AU.Mp3Compression(
314
+ min_bitrate=augs['mp3_compression_min_bitrate'],
315
+ max_bitrate=augs['mp3_compression_max_bitrate'],
316
+ backend=augs['mp3_compression_backend'],
317
+ p=1.0
318
+ )
319
+ source = apply_aug(samples=source, sample_rate=44100)
320
+ applied_augs.append('mp3_compression')
321
+ # Random AddGaussianNoise
322
+ if 'gaussian_noise' in augs:
323
+ if augs['gaussian_noise'] > 0:
324
+ if random.uniform(0, 1) < augs['gaussian_noise']:
325
+ apply_aug = AU.AddGaussianNoise(
326
+ min_amplitude=augs['gaussian_noise_min_amplitude'],
327
+ max_amplitude=augs['gaussian_noise_max_amplitude'],
328
+ p=1.0
329
+ )
330
+ source = apply_aug(samples=source, sample_rate=44100)
331
+ applied_augs.append('gaussian_noise')
332
+ # Random TimeStretch
333
+ if 'time_stretch' in augs:
334
+ if augs['time_stretch'] > 0:
335
+ if random.uniform(0, 1) < augs['time_stretch']:
336
+ apply_aug = AU.TimeStretch(
337
+ min_rate=augs['time_stretch_min_rate'],
338
+ max_rate=augs['time_stretch_max_rate'],
339
+ leave_length_unchanged=True,
340
+ p=1.0
341
+ )
342
+ source = apply_aug(samples=source, sample_rate=44100)
343
+ applied_augs.append('time_stretch')
344
+
345
+ # Possible fix of shape
346
+ if source_shape != source.shape:
347
+ source = source[..., :source_shape[-1]]
348
+
349
+ # Random Reverb
350
+ if 'pedalboard_reverb' in augs:
351
+ if augs['pedalboard_reverb'] > 0:
352
+ if random.uniform(0, 1) < augs['pedalboard_reverb']:
353
+ room_size = random.uniform(
354
+ augs['pedalboard_reverb_room_size_min'],
355
+ augs['pedalboard_reverb_room_size_max'],
356
+ )
357
+ damping = random.uniform(
358
+ augs['pedalboard_reverb_damping_min'],
359
+ augs['pedalboard_reverb_damping_max'],
360
+ )
361
+ wet_level = random.uniform(
362
+ augs['pedalboard_reverb_wet_level_min'],
363
+ augs['pedalboard_reverb_wet_level_max'],
364
+ )
365
+ dry_level = random.uniform(
366
+ augs['pedalboard_reverb_dry_level_min'],
367
+ augs['pedalboard_reverb_dry_level_max'],
368
+ )
369
+ width = random.uniform(
370
+ augs['pedalboard_reverb_width_min'],
371
+ augs['pedalboard_reverb_width_max'],
372
+ )
373
+ board = PB.Pedalboard([PB.Reverb(
374
+ room_size=room_size, # 0.1 - 0.9
375
+ damping=damping, # 0.1 - 0.9
376
+ wet_level=wet_level, # 0.1 - 0.9
377
+ dry_level=dry_level, # 0.1 - 0.9
378
+ width=width, # 0.9 - 1.0
379
+ freeze_mode=0.0,
380
+ )])
381
+ source = board(source, 44100)
382
+ applied_augs.append('pedalboard_reverb')
383
+
384
+ # Random Chorus
385
+ if 'pedalboard_chorus' in augs:
386
+ if augs['pedalboard_chorus'] > 0:
387
+ if random.uniform(0, 1) < augs['pedalboard_chorus']:
388
+ rate_hz = random.uniform(
389
+ augs['pedalboard_chorus_rate_hz_min'],
390
+ augs['pedalboard_chorus_rate_hz_max'],
391
+ )
392
+ depth = random.uniform(
393
+ augs['pedalboard_chorus_depth_min'],
394
+ augs['pedalboard_chorus_depth_max'],
395
+ )
396
+ centre_delay_ms = random.uniform(
397
+ augs['pedalboard_chorus_centre_delay_ms_min'],
398
+ augs['pedalboard_chorus_centre_delay_ms_max'],
399
+ )
400
+ feedback = random.uniform(
401
+ augs['pedalboard_chorus_feedback_min'],
402
+ augs['pedalboard_chorus_feedback_max'],
403
+ )
404
+ mix = random.uniform(
405
+ augs['pedalboard_chorus_mix_min'],
406
+ augs['pedalboard_chorus_mix_max'],
407
+ )
408
+ board = PB.Pedalboard([PB.Chorus(
409
+ rate_hz=rate_hz,
410
+ depth=depth,
411
+ centre_delay_ms=centre_delay_ms,
412
+ feedback=feedback,
413
+ mix=mix,
414
+ )])
415
+ source = board(source, 44100)
416
+ applied_augs.append('pedalboard_chorus')
417
+
418
+ # Random Phazer
419
+ if 'pedalboard_phazer' in augs:
420
+ if augs['pedalboard_phazer'] > 0:
421
+ if random.uniform(0, 1) < augs['pedalboard_phazer']:
422
+ rate_hz = random.uniform(
423
+ augs['pedalboard_phazer_rate_hz_min'],
424
+ augs['pedalboard_phazer_rate_hz_max'],
425
+ )
426
+ depth = random.uniform(
427
+ augs['pedalboard_phazer_depth_min'],
428
+ augs['pedalboard_phazer_depth_max'],
429
+ )
430
+ centre_frequency_hz = random.uniform(
431
+ augs['pedalboard_phazer_centre_frequency_hz_min'],
432
+ augs['pedalboard_phazer_centre_frequency_hz_max'],
433
+ )
434
+ feedback = random.uniform(
435
+ augs['pedalboard_phazer_feedback_min'],
436
+ augs['pedalboard_phazer_feedback_max'],
437
+ )
438
+ mix = random.uniform(
439
+ augs['pedalboard_phazer_mix_min'],
440
+ augs['pedalboard_phazer_mix_max'],
441
+ )
442
+ board = PB.Pedalboard([PB.Phaser(
443
+ rate_hz=rate_hz,
444
+ depth=depth,
445
+ centre_frequency_hz=centre_frequency_hz,
446
+ feedback=feedback,
447
+ mix=mix,
448
+ )])
449
+ source = board(source, 44100)
450
+ applied_augs.append('pedalboard_phazer')
451
+
452
+ # Random Distortion
453
+ if 'pedalboard_distortion' in augs:
454
+ if augs['pedalboard_distortion'] > 0:
455
+ if random.uniform(0, 1) < augs['pedalboard_distortion']:
456
+ drive_db = random.uniform(
457
+ augs['pedalboard_distortion_drive_db_min'],
458
+ augs['pedalboard_distortion_drive_db_max'],
459
+ )
460
+ board = PB.Pedalboard([PB.Distortion(
461
+ drive_db=drive_db,
462
+ )])
463
+ source = board(source, 44100)
464
+ applied_augs.append('pedalboard_distortion')
465
+
466
+ # Random PitchShift
467
+ if 'pedalboard_pitch_shift' in augs:
468
+ if augs['pedalboard_pitch_shift'] > 0:
469
+ if random.uniform(0, 1) < augs['pedalboard_pitch_shift']:
470
+ semitones = random.uniform(
471
+ augs['pedalboard_pitch_shift_semitones_min'],
472
+ augs['pedalboard_pitch_shift_semitones_max'],
473
+ )
474
+ board = PB.Pedalboard([PB.PitchShift(
475
+ semitones=semitones
476
+ )])
477
+ source = board(source, 44100)
478
+ applied_augs.append('pedalboard_pitch_shift')
479
+
480
+ # Random Resample
481
+ if 'pedalboard_resample' in augs:
482
+ if augs['pedalboard_resample'] > 0:
483
+ if random.uniform(0, 1) < augs['pedalboard_resample']:
484
+ target_sample_rate = random.uniform(
485
+ augs['pedalboard_resample_target_sample_rate_min'],
486
+ augs['pedalboard_resample_target_sample_rate_max'],
487
+ )
488
+ board = PB.Pedalboard([PB.Resample(
489
+ target_sample_rate=target_sample_rate
490
+ )])
491
+ source = board(source, 44100)
492
+ applied_augs.append('pedalboard_resample')
493
+
494
+ # Random Bitcrash
495
+ if 'pedalboard_bitcrash' in augs:
496
+ if augs['pedalboard_bitcrash'] > 0:
497
+ if random.uniform(0, 1) < augs['pedalboard_bitcrash']:
498
+ bit_depth = random.uniform(
499
+ augs['pedalboard_bitcrash_bit_depth_min'],
500
+ augs['pedalboard_bitcrash_bit_depth_max'],
501
+ )
502
+ board = PB.Pedalboard([PB.Bitcrush(
503
+ bit_depth=bit_depth
504
+ )])
505
+ source = board(source, 44100)
506
+ applied_augs.append('pedalboard_bitcrash')
507
+
508
+ # Random MP3Compressor
509
+ if 'pedalboard_mp3_compressor' in augs:
510
+ if augs['pedalboard_mp3_compressor'] > 0:
511
+ if random.uniform(0, 1) < augs['pedalboard_mp3_compressor']:
512
+ vbr_quality = random.uniform(
513
+ augs['pedalboard_mp3_compressor_pedalboard_mp3_compressor_min'],
514
+ augs['pedalboard_mp3_compressor_pedalboard_mp3_compressor_max'],
515
+ )
516
+ board = PB.Pedalboard([PB.MP3Compressor(
517
+ vbr_quality=vbr_quality
518
+ )])
519
+ source = board(source, 44100)
520
+ applied_augs.append('pedalboard_mp3_compressor')
521
+
522
+ # print(applied_augs)
523
+ return source
524
+
525
+ def __getitem__(self, index):
526
+ if self.dataset_type in [1, 2, 3]:
527
+ res = self.load_random_mix()
528
+ else:
529
+ res = self.load_aligned_data()
530
+
531
+ # Randomly change loudness of each stem
532
+ if self.aug:
533
+ if 'loudness' in self.config['augmentations']:
534
+ if self.config['augmentations']['loudness']:
535
+ loud_values = np.random.uniform(
536
+ low=self.config['augmentations']['loudness_min'],
537
+ high=self.config['augmentations']['loudness_max'],
538
+ size=(len(res),)
539
+ )
540
+ loud_values = torch.tensor(loud_values, dtype=torch.float32)
541
+ res *= loud_values[:, None, None]
542
+
543
+ mix = res.sum(0)
544
+
545
+ if self.aug:
546
+ if 'mp3_compression_on_mixture' in self.config['augmentations']:
547
+ apply_aug = AU.Mp3Compression(
548
+ min_bitrate=self.config['augmentations']['mp3_compression_on_mixture_bitrate_min'],
549
+ max_bitrate=self.config['augmentations']['mp3_compression_on_mixture_bitrate_max'],
550
+ backend=self.config['augmentations']['mp3_compression_on_mixture_backend'],
551
+ p=self.config['augmentations']['mp3_compression_on_mixture']
552
+ )
553
+ mix_conv = mix.cpu().numpy().astype(np.float32)
554
+ required_shape = mix_conv.shape
555
+ mix = apply_aug(samples=mix_conv, sample_rate=44100)
556
+ # Sometimes it gives longer audio (so we cut)
557
+ if mix.shape != required_shape:
558
+ mix = mix[..., :required_shape[-1]]
559
+ mix = torch.tensor(mix, dtype=torch.float32)
560
+
561
+ # If we need only given stem (for roformers)
562
+ if self.config.training.target_instrument is not None:
563
+ index = self.config.training.instruments.index(self.config.training.target_instrument)
564
+ return res[index], mix
565
+
566
+ return res, mix
data_pipeline/seperation/docs/augmentations.md ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Augmentations
2
+
3
+ Augmentations allows to change stems on the fly increasing the size of dataset by creating new samples from old samples.
4
+ Now control for augmentations is done from config file. Below you can find the example of full config,
5
+ which includes all available augmentations:
6
+
7
+ ```config
8
+ augmentations:
9
+ enable: true # enable or disable all augmentations (to fast disable if needed)
10
+ loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
11
+ loudness_min: 0.5
12
+ loudness_max: 1.5
13
+ mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
14
+ mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
15
+ - 0.2
16
+ - 0.02
17
+ mixup_loudness_min: 0.5
18
+ mixup_loudness_max: 1.5
19
+
20
+ # apply mp3 compression to mixture only (emulate downloading mp3 from internet)
21
+ mp3_compression_on_mixture: 0.01
22
+ mp3_compression_on_mixture_bitrate_min: 32
23
+ mp3_compression_on_mixture_bitrate_max: 320
24
+ mp3_compression_on_mixture_backend: "lameenc"
25
+
26
+ all:
27
+ channel_shuffle: 0.5 # Set 0 or lower to disable
28
+ random_inverse: 0.1 # inverse track (better lower probability)
29
+ random_polarity: 0.5 # polarity change (multiply waveform to -1)
30
+ mp3_compression: 0.01
31
+ mp3_compression_min_bitrate: 32
32
+ mp3_compression_max_bitrate: 320
33
+ mp3_compression_backend: "lameenc"
34
+
35
+ # pedalboard reverb block
36
+ pedalboard_reverb: 0.01
37
+ pedalboard_reverb_room_size_min: 0.1
38
+ pedalboard_reverb_room_size_max: 0.9
39
+ pedalboard_reverb_damping_min: 0.1
40
+ pedalboard_reverb_damping_max: 0.9
41
+ pedalboard_reverb_wet_level_min: 0.1
42
+ pedalboard_reverb_wet_level_max: 0.9
43
+ pedalboard_reverb_dry_level_min: 0.1
44
+ pedalboard_reverb_dry_level_max: 0.9
45
+ pedalboard_reverb_width_min: 0.9
46
+ pedalboard_reverb_width_max: 1.0
47
+
48
+ # pedalboard chorus block
49
+ pedalboard_chorus: 0.01
50
+ pedalboard_chorus_rate_hz_min: 1.0
51
+ pedalboard_chorus_rate_hz_max: 7.0
52
+ pedalboard_chorus_depth_min: 0.25
53
+ pedalboard_chorus_depth_max: 0.95
54
+ pedalboard_chorus_centre_delay_ms_min: 3
55
+ pedalboard_chorus_centre_delay_ms_max: 10
56
+ pedalboard_chorus_feedback_min: 0.0
57
+ pedalboard_chorus_feedback_max: 0.5
58
+ pedalboard_chorus_mix_min: 0.1
59
+ pedalboard_chorus_mix_max: 0.9
60
+
61
+ # pedalboard phazer block
62
+ pedalboard_phazer: 0.01
63
+ pedalboard_phazer_rate_hz_min: 1.0
64
+ pedalboard_phazer_rate_hz_max: 10.0
65
+ pedalboard_phazer_depth_min: 0.25
66
+ pedalboard_phazer_depth_max: 0.95
67
+ pedalboard_phazer_centre_frequency_hz_min: 200
68
+ pedalboard_phazer_centre_frequency_hz_max: 12000
69
+ pedalboard_phazer_feedback_min: 0.0
70
+ pedalboard_phazer_feedback_max: 0.5
71
+ pedalboard_phazer_mix_min: 0.1
72
+ pedalboard_phazer_mix_max: 0.9
73
+
74
+ # pedalboard distortion block
75
+ pedalboard_distortion: 0.01
76
+ pedalboard_distortion_drive_db_min: 1.0
77
+ pedalboard_distortion_drive_db_max: 25.0
78
+
79
+ # pedalboard pitch shift block
80
+ pedalboard_pitch_shift: 0.01
81
+ pedalboard_pitch_shift_semitones_min: -7
82
+ pedalboard_pitch_shift_semitones_max: 7
83
+
84
+ # pedalboard resample block
85
+ pedalboard_resample: 0.01
86
+ pedalboard_resample_target_sample_rate_min: 4000
87
+ pedalboard_resample_target_sample_rate_max: 44100
88
+
89
+ # pedalboard bitcrash block
90
+ pedalboard_bitcrash: 0.01
91
+ pedalboard_bitcrash_bit_depth_min: 4
92
+ pedalboard_bitcrash_bit_depth_max: 16
93
+
94
+ # pedalboard mp3 compressor block
95
+ pedalboard_mp3_compressor: 0.01
96
+ pedalboard_mp3_compressor_pedalboard_mp3_compressor_min: 0
97
+ pedalboard_mp3_compressor_pedalboard_mp3_compressor_max: 9.999
98
+
99
+ vocals:
100
+ pitch_shift: 0.1
101
+ pitch_shift_min_semitones: -5
102
+ pitch_shift_max_semitones: 5
103
+ seven_band_parametric_eq: 0.25
104
+ seven_band_parametric_eq_min_gain_db: -9
105
+ seven_band_parametric_eq_max_gain_db: 9
106
+ tanh_distortion: 0.1
107
+ tanh_distortion_min: 0.1
108
+ tanh_distortion_max: 0.7
109
+ bass:
110
+ pitch_shift: 0.1
111
+ pitch_shift_min_semitones: -2
112
+ pitch_shift_max_semitones: 2
113
+ seven_band_parametric_eq: 0.25
114
+ seven_band_parametric_eq_min_gain_db: -3
115
+ seven_band_parametric_eq_max_gain_db: 6
116
+ tanh_distortion: 0.2
117
+ tanh_distortion_min: 0.1
118
+ tanh_distortion_max: 0.5
119
+ drums:
120
+ pitch_shift: 0.33
121
+ pitch_shift_min_semitones: -5
122
+ pitch_shift_max_semitones: 5
123
+ seven_band_parametric_eq: 0.25
124
+ seven_band_parametric_eq_min_gain_db: -9
125
+ seven_band_parametric_eq_max_gain_db: 9
126
+ tanh_distortion: 0.33
127
+ tanh_distortion_min: 0.1
128
+ tanh_distortion_max: 0.6
129
+ other:
130
+ pitch_shift: 0.1
131
+ pitch_shift_min_semitones: -4
132
+ pitch_shift_max_semitones: 4
133
+ gaussian_noise: 0.1
134
+ gaussian_noise_min_amplitude: 0.001
135
+ gaussian_noise_max_amplitude: 0.015
136
+ time_stretch: 0.01
137
+ time_stretch_min_rate: 0.8
138
+ time_stretch_max_rate: 1.25
139
+ ```
140
+
141
+ You can copypaste it into your config to use augmentations.
142
+ Notes:
143
+ * To completely disable all augmentations you can either remove `augmentations` section from config or set `enable` to `false`.
144
+ * If you want to disable some augmentation, just set it to zero.
145
+ * Augmentations in `all` subsections applied to all stems
146
+ * Augmentations in `vocals`, `bass` etc subsections applied only to corresponding stems. You can create such subsections for all stems which are given in `training.instruments`.
data_pipeline/seperation/docs/bs_roformer_info.md ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Batch sizes for BSRoformer
2
+
3
+ You can use table below to choose BS Roformer `batch_size` parameter for training based on your GPUs. Batch size values provided for single GPU. If you have several GPUs you need to multiply value on number of GPUs.
4
+
5
+ | chunk_size | dim | depth | batch_size (A6000 48GB) | batch_size (3090/4090 24GB) | batch_size (16GB) |
6
+ |:----------:|:---:|:-----:|:-----------------------:|:---------------------------:|:-----------------:|
7
+ | 131584 | 128 | 6 | 10 | 5 | 3 |
8
+ | 131584 | 256 | 6 | 8 | 4 | 2 |
9
+ | 131584 | 384 | 6 | 7 | 3 | 2 |
10
+ | 131584 | 512 | 6 | 6 | 3 | 2 |
11
+ | 131584 | 256 | 8 | 6 | 3 | 2 |
12
+ | 131584 | 256 | 12 | 4 | 2 | 1 |
13
+ | 263168 | 128 | 6 | 4 | 2 | 1 |
14
+ | 263168 | 256 | 6 | 3 | 1 | 1 |
15
+ | 352800 | 128 | 6 | 2 | 1 | - |
16
+ | 352800 | 256 | 6 | 2 | 1 | - |
17
+ | 352800 | 384 | 12 | 1 | - | - |
18
+ | 352800 | 512 | 12 | - | - | - |
19
+
20
+
21
+ Parameters obtained with initial config:
22
+
23
+ ```
24
+ audio:
25
+ chunk_size: 131584
26
+ dim_f: 1024
27
+ dim_t: 515
28
+ hop_length: 512
29
+ n_fft: 2048
30
+ num_channels: 2
31
+ sample_rate: 44100
32
+ min_mean_abs: 0.000
33
+
34
+ model:
35
+ dim: 384
36
+ depth: 12
37
+ stereo: true
38
+ num_stems: 1
39
+ time_transformer_depth: 1
40
+ freq_transformer_depth: 1
41
+ linear_transformer_depth: 0
42
+ freqs_per_bands: !!python/tuple
43
+ - 2
44
+ - 2
45
+ - 2
46
+ - 2
47
+ - 2
48
+ - 2
49
+ - 2
50
+ - 2
51
+ - 2
52
+ - 2
53
+ - 2
54
+ - 2
55
+ - 2
56
+ - 2
57
+ - 2
58
+ - 2
59
+ - 2
60
+ - 2
61
+ - 2
62
+ - 2
63
+ - 2
64
+ - 2
65
+ - 2
66
+ - 2
67
+ - 4
68
+ - 4
69
+ - 4
70
+ - 4
71
+ - 4
72
+ - 4
73
+ - 4
74
+ - 4
75
+ - 4
76
+ - 4
77
+ - 4
78
+ - 4
79
+ - 12
80
+ - 12
81
+ - 12
82
+ - 12
83
+ - 12
84
+ - 12
85
+ - 12
86
+ - 12
87
+ - 24
88
+ - 24
89
+ - 24
90
+ - 24
91
+ - 24
92
+ - 24
93
+ - 24
94
+ - 24
95
+ - 48
96
+ - 48
97
+ - 48
98
+ - 48
99
+ - 48
100
+ - 48
101
+ - 48
102
+ - 48
103
+ - 128
104
+ - 129
105
+ dim_head: 64
106
+ heads: 8
107
+ attn_dropout: 0.1
108
+ ff_dropout: 0.1
109
+ flash_attn: false
110
+ dim_freqs_in: 1025
111
+ stft_n_fft: 2048
112
+ stft_hop_length: 512
113
+ stft_win_length: 2048
114
+ stft_normalized: false
115
+ mask_estimator_depth: 2
116
+ multi_stft_resolution_loss_weight: 1.0
117
+ multi_stft_resolutions_window_sizes: !!python/tuple
118
+ - 4096
119
+ - 2048
120
+ - 1024
121
+ - 512
122
+ - 256
123
+ multi_stft_hop_size: 147
124
+ multi_stft_normalized: False
125
+
126
+ training:
127
+ batch_size: 1
128
+ gradient_accumulation_steps: 1
129
+ grad_clip: 0
130
+ instruments:
131
+ - vocals
132
+ - other
133
+ lr: 3.0e-05
134
+ patience: 2
135
+ reduce_factor: 0.95
136
+ target_instrument: vocals
137
+ num_epochs: 1000
138
+ num_steps: 1000
139
+ q: 0.95
140
+ coarse_loss_clip: true
141
+ ema_momentum: 0.999
142
+ optimizer: adam
143
+ other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
144
+ use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
145
+ ```
data_pipeline/seperation/docs/changes.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Changes
2
+
3
+ #### v1.0.2
4
+
5
+ * Added multi GPU validation (earlier validation was performed on single GPU)
6
+ * `training.batch_size` in config now must be set for single GPU (if you use multiple GPUs it will be automatically multiplied by number of GPUs)
7
+
8
+ #### v1.0.3
9
+
10
+ * Added "spawn" fix for multiprocessing
11
+ * Function `get_model_from_config` now takes path of config as input.
12
+ * On latest version of pytorch some problems with torch.backends.cudnn.benchmark = True - big slow down. Fixed version 2.0.1 in requirements.txt
13
+ * `--valid_path` parameter for train.py now can accept several validation folders instead of one. Added warning if validation folder is empty.
14
+ * Small fix for AMP usage in Demucs models taken from config
15
+ * Support for Demucs3 mmi model was added
16
+ * GPU memory consumption was reduced during inference and validation.
17
+ * Some changes to repair click problems on the edges of segment.
18
+ * Added support to train on FLAC files. Some more error checks added.
19
+ * viperx's Roformer weights and configs added
20
+ * `--extract_instrumental` argument added to inference.py
data_pipeline/seperation/docs/dataset_types.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Dataset types for training
2
+
3
+ * **Type 1 (MUSDB)**: different folders. Each folder contains all needed stems in format _< stem name >.wav_. The same as in MUSDBHQ18 dataset. In latest code releases it's possible to use `flac` instead of `wav`.
4
+
5
+ Example:
6
+ ```
7
+ --- Song 1:
8
+ ------ vocals.wav
9
+ ------ bass.wav
10
+ ------ drums.wav
11
+ ------ other.wav
12
+ --- Song 2:
13
+ ------ vocals.wav
14
+ ------ bass.wav
15
+ ------ drums.wav
16
+ ------ other.wav
17
+ --- Song 3:
18
+ ...........
19
+ ```
20
+
21
+ * **Type 2 (Stems)**: each folder is "stem name". Folder contains wav files which consists only of required stem.
22
+ ```
23
+ --- vocals:
24
+ ------ vocals_1.wav
25
+ ------ vocals_2.wav
26
+ ------ vocals_3.wav
27
+ ------ vocals_4.wav
28
+ ------ ...
29
+ --- bass:
30
+ ------ bass_1.wav
31
+ ------ bass_2.wav
32
+ ------ bass_3.wav
33
+ ------ bass_4.wav
34
+ ------ ...
35
+ ...........
36
+ ```
37
+
38
+ * **Type 3 (CSV file)**:
39
+
40
+ You can provide CSV-file (or list of CSV-files) with following structure:
41
+ ```
42
+ instrum,path
43
+ vocals,/path/to/dataset/vocals_1.wav
44
+ vocals,/path/to/dataset2/vocals_v2.wav
45
+ vocals,/path/to/dataset3/vocals_some.wav
46
+ ...
47
+ drums,/path/to/dataset/drums_good.wav
48
+ ...
49
+ ```
50
+
51
+ * **Type 4 (MUSDB Aligned)**:
52
+
53
+ The same as Type 1, but during training all instruments will be from the same position of song.
54
+
55
+ ### Dataset for validation
56
+
57
+ * The validation dataset must be the same structure as type 1 datasets (regardless of what type of dataset you're using for training), but also each folder must include `mixture.wav` for each song. `mixture.wav` - is the sum of all stems for song.
58
+
59
+ Example:
60
+ ```
61
+ --- Song 1:
62
+ ------ vocals.wav
63
+ ------ bass.wav
64
+ ------ drums.wav
65
+ ------ other.wav
66
+ ------ mixture.wav
67
+ --- Song 2:
68
+ ------ vocals.wav
69
+ ------ bass.wav
70
+ ------ drums.wav
71
+ ------ other.wav
72
+ ------ mixture.wav
73
+ --- Song 3:
74
+ ...........
75
+ ```
data_pipeline/seperation/inference.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+ __author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/'
3
+
4
+ import argparse
5
+ import time
6
+ import librosa
7
+ from tqdm import tqdm
8
+ import sys
9
+ import os
10
+ import glob
11
+ import torch
12
+ import numpy as np
13
+ import soundfile as sf
14
+ import torch.nn as nn
15
+ from utils import demix_track, demix_track_demucs, get_model_from_config
16
+
17
+ import warnings
18
+ warnings.filterwarnings("ignore")
19
+
20
+
21
+ def run_folder(model, args, config, device, verbose=False):
22
+ start_time = time.time()
23
+ model.eval()
24
+ all_mixtures_path = glob.glob(args.input_folder + '/*.*')
25
+ print('Total files found: {}'.format(len(all_mixtures_path)))
26
+
27
+ instruments = config.training.instruments
28
+ if config.training.target_instrument is not None:
29
+ instruments = [config.training.target_instrument]
30
+
31
+ if not os.path.isdir(args.store_dir):
32
+ os.mkdir(args.store_dir)
33
+
34
+ if not verbose:
35
+ all_mixtures_path = tqdm(all_mixtures_path)
36
+
37
+ for path in all_mixtures_path:
38
+ if not verbose:
39
+ all_mixtures_path.set_postfix({'track': os.path.basename(path)})
40
+ try:
41
+ # mix, sr = sf.read(path)
42
+ mix, sr = librosa.load(path, sr=44100, mono=False)
43
+ mix = mix.T
44
+ except Exception as e:
45
+ print('Can read track: {}'.format(path))
46
+ print('Error message: {}'.format(str(e)))
47
+ continue
48
+
49
+ # Convert mono to stereo if needed
50
+ if len(mix.shape) == 1:
51
+ mix = np.stack([mix, mix], axis=-1)
52
+
53
+ mixture = torch.tensor(mix.T, dtype=torch.float32)
54
+ if args.model_type == 'htdemucs':
55
+ res = demix_track_demucs(config, model, mixture, device)
56
+ else:
57
+ res = demix_track(config, model, mixture, device)
58
+ for instr in instruments:
59
+ sf.write("{}/{}_{}.wav".format(args.store_dir, os.path.basename(path)[:-4], instr), res[instr].T, sr, subtype='FLOAT')
60
+
61
+ if 'vocals' in instruments and args.extract_instrumental:
62
+ instrum_file_name = "{}/{}_{}.wav".format(args.store_dir, os.path.basename(path)[:-4], 'instrumental')
63
+ sf.write(instrum_file_name, mix - res['vocals'].T, sr, subtype='FLOAT')
64
+
65
+ time.sleep(1)
66
+ print("Elapsed time: {:.2f} sec".format(time.time() - start_time))
67
+
68
+
69
+ def proc_folder(args):
70
+ parser = argparse.ArgumentParser()
71
+ parser.add_argument("--model_type", type=str, default='mdx23c', help="One of mdx23c, htdemucs, segm_models, mel_band_roformer, bs_roformer, swin_upernet, bandit")
72
+ parser.add_argument("--config_path", type=str, help="path to config file")
73
+ parser.add_argument("--start_check_point", type=str, default='', help="Initial checkpoint to valid weights")
74
+ parser.add_argument("--input_folder", type=str, help="folder with mixtures to process")
75
+ parser.add_argument("--store_dir", default="", type=str, help="path to store results as wav file")
76
+ parser.add_argument("--model-dir", default="", type=str, help="path to store results as wav file")
77
+ parser.add_argument("--log-dir", default="", type=str, help="path to store results as wav file")
78
+ parser.add_argument("--device_ids", nargs='+', type=int, default=0, help='list of gpu ids')
79
+ parser.add_argument("--extract_instrumental", action='store_true', help="invert vocals to get instrumental if provided")
80
+ print(f"cuda{torch.cuda.is_available()}")
81
+ if args is None:
82
+ args = parser.parse_args()
83
+ else:
84
+ args = parser.parse_args(args)
85
+
86
+ torch.backends.cudnn.benchmark = True
87
+
88
+ model, config = get_model_from_config(args.model_type, args.config_path)
89
+ if args.start_check_point != '':
90
+ print('Start from checkpoint: {}'.format(args.start_check_point))
91
+ state_dict = torch.load(args.start_check_point, map_location='cpu')
92
+ if args.model_type == 'htdemucs':
93
+ # Fix for htdemucs pround etrained models
94
+ if 'state' in state_dict:
95
+ state_dict = state_dict['state']
96
+ model.load_state_dict(state_dict)
97
+ print("Instruments: {}".format(config.training.instruments))
98
+
99
+ if torch.cuda.is_available():
100
+ device_ids = args.device_ids
101
+ if type(device_ids)==int:
102
+ device = torch.device(f'cuda:{device_ids}')
103
+ model = model.to(device)
104
+ else:
105
+ device = torch.device(f'cuda:{device_ids[0]}')
106
+ model = nn.DataParallel(model, device_ids=device_ids).to(device)
107
+ else:
108
+ device = 'cpu'
109
+ print('CUDA is not avilable. Run inference on CPU. It will be very slow...')
110
+ model = model.to(device)
111
+
112
+ run_folder(model, args, config, device, verbose=False)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ proc_folder(None)
data_pipeline/seperation/inference.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ input_dir=$1
2
+ output_dir=$2
3
+ ckpt_dir=$3
4
+
5
+ python3 inference.py \
6
+ --model_type bs_roformer \
7
+ --config_path ${ckpt_dir}/model_bs_roformer_ep_317_sdr_12.9755.yaml \
8
+ --start_check_point ${ckpt_dir}/model_bs_roformer_ep_317_sdr_12.9755.ckpt \
9
+ --input_folder ${input_dir} \
10
+ --store_dir ${output_dir} \
11
+ --extract_instrumental
data_pipeline/seperation/inference_mp.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.multiprocessing as mp
3
+ import os, sys
4
+ import threading
5
+ from tqdm import tqdm
6
+ import soundfile as sf
7
+ import threading
8
+ import librosa
9
+ import numpy as np
10
+ from utils import demix_track, demix_track_demucs, get_model_from_config
11
+ import traceback
12
+ import glob
13
+ import argparse
14
+
15
+ import warnings
16
+ warnings.filterwarnings("ignore")
17
+
18
+ def normalize_audio(y, target_dbfs=0):
19
+ max_amplitude = np.max(np.abs(y))
20
+ if max_amplitude < 0.1:
21
+ return y
22
+
23
+ target_amplitude = 10.0**(target_dbfs / 20.0)
24
+ scale_factor = target_amplitude / max_amplitude
25
+
26
+ normalized_audio = y * scale_factor
27
+
28
+ return normalized_audio
29
+
30
+ def inference(rank, ckpt_root, out_dir, queue: mp.Queue):
31
+ #print(f"thread {rank} start")
32
+ device = f"cuda:{rank}"
33
+ config = f"{ckpt_root}/model_bs_roformer_ep_317_sdr_12.9755.yaml"
34
+ ckpt = f"{ckpt_root}/model_bs_roformer_ep_317_sdr_12.9755.ckpt"
35
+ model, config = get_model_from_config("bs_roformer", config)
36
+ state_dict = torch.load(ckpt, map_location='cpu')
37
+ model.load_state_dict(state_dict)
38
+ model = model.to(device)
39
+ model.eval()
40
+
41
+
42
+ with torch.no_grad():
43
+ while True:
44
+ #print(texts)
45
+ filename = queue.get()
46
+ if filename is None:
47
+ break
48
+ filepath = filename[0]
49
+ filename = filepath.split('/')[-1]
50
+ try:
51
+ mix, sr = librosa.load(filepath, sr=44100, mono=False)
52
+ #mix = normalize_audio(mix, -6)
53
+ mix = mix.T
54
+ if len(mix.shape) == 1:
55
+ mix = np.stack([mix, mix], axis=-1)
56
+
57
+ mixture = torch.tensor(mix.T, dtype=torch.float32)
58
+ res = demix_track(config, model, mixture, device)
59
+ sf.write("{}/{}".format(os.path.join(out_dir, "vocal"), filename), res['vocals'].T.mean(-1), sr, subtype='FLOAT')
60
+ sf.write("{}/{}".format(os.path.join(out_dir, "bgm"), filename), mix.mean(-1) - res['vocals'].T.mean(-1), sr, subtype='FLOAT')
61
+
62
+
63
+ except Exception as e:
64
+ traceback.print_exc()
65
+ continue
66
+
67
+
68
+
69
+ def setInterval(interval):
70
+ def decorator(function):
71
+ def wrapper(*args, **kwargs):
72
+ stopped = threading.Event()
73
+
74
+ def loop(): # executed in another thread
75
+ while not stopped.wait(interval): # until stopped
76
+ function(*args, **kwargs)
77
+
78
+ t = threading.Thread(target=loop)
79
+ t.daemon = True # stop if the program exits
80
+ t.start()
81
+ return stopped
82
+
83
+ return wrapper
84
+
85
+ return decorator
86
+
87
+ last_batches = None
88
+
89
+ @setInterval(3)
90
+ def QueueWatcher(queue, bar):
91
+ global last_batches
92
+ curr_batches = queue.qsize()
93
+ bar.update(last_batches-curr_batches)
94
+ last_batches = curr_batches
95
+
96
+ if __name__ == "__main__":
97
+ parser = argparse.ArgumentParser()
98
+ parser.add_argument("--filelist_or_dir", type=str, required=True, help="Path to save checkpoints")
99
+ parser.add_argument("--out_dir", type=str, required=True, help="Path to save checkpoints")
100
+ parser.add_argument("--ckpt_path", type=str, required=True, help="Path to save checkpoints")
101
+ parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints")
102
+ parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints")
103
+ parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints")
104
+ args = parser.parse_args()
105
+
106
+ filelist_or_dir = args.filelist_or_dir
107
+ out_dir = args.out_dir
108
+ ckpt_path = args.ckpt_path
109
+ jobs = args.jobs
110
+ vad_jobs = jobs * 2
111
+
112
+ if os.path.isfile(filelist_or_dir):
113
+ filelist_name = filelist_or_dir.split('/')[-1].split('.')[0]
114
+ generator = open(filelist_or_dir).read().splitlines()
115
+ else:
116
+ filelist_name = "single"
117
+ generator = glob.glob(f"{filelist_or_dir}/*.wav")
118
+
119
+ os.makedirs(os.path.join(out_dir, "vocal"), exist_ok=True)
120
+ os.makedirs(os.path.join(out_dir, "bgm"), exist_ok=True)
121
+
122
+
123
+ gpu_num = torch.cuda.device_count()
124
+
125
+ processes = []
126
+ vad_processes = []
127
+ queue = mp.Queue()
128
+ vad_queue = mp.Queue()
129
+ for thread_num in range(jobs):
130
+ rank = thread_num % gpu_num
131
+ p = mp.Process(target=inference, args=(rank, ckpt_path, out_dir, queue))
132
+ p.start()
133
+ processes.append(p)
134
+
135
+ accum = []
136
+
137
+ for filename in tqdm(generator):
138
+ accum.append(filename)
139
+ if len(accum) == 1:
140
+ queue.put(accum.copy())
141
+ accum.clear()
142
+
143
+ for _ in range(jobs):
144
+ queue.put(None)
145
+
146
+ last_batches = queue.qsize()
147
+ bar = tqdm(total=last_batches, desc="seperation")
148
+ queue_watcher = QueueWatcher(queue, bar)
149
+ for p in processes:
150
+ p.join()
151
+ queue_watcher.set()
152
+
153
+ for p in vad_processes:
154
+ p.join()
data_pipeline/seperation/inference_mp.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ python3 inference_mp.py \
2
+ /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/wav \
3
+ /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/vocal \
4
+ /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/bgm \
5
+ /data/v-ziqianning/workspace/SingingTTS/data_pipeline/seperation/Music-Source-Separation-Training/ckpts/model_bs_roformer_ep_317_sdr_12.9755.yaml \
6
+ /data/v-ziqianning/workspace/SingingTTS/data_pipeline/seperation/Music-Source-Separation-Training/ckpts/model_bs_roformer_ep_317_sdr_12.9755.ckpt \
7
+ 4
data_pipeline/seperation/models/bandit/core/__init__.py ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from collections import defaultdict
3
+ from itertools import chain, combinations
4
+ from typing import (
5
+ Any,
6
+ Dict,
7
+ Iterator,
8
+ Mapping, Optional,
9
+ Tuple, Type,
10
+ TypedDict
11
+ )
12
+
13
+ import pytorch_lightning as pl
14
+ import torch
15
+ import torchaudio as ta
16
+ import torchmetrics as tm
17
+ from asteroid import losses as asteroid_losses
18
+ # from deepspeed.ops.adam import DeepSpeedCPUAdam
19
+ # from geoopt import optim as gooptim
20
+ from pytorch_lightning.utilities.types import STEP_OUTPUT
21
+ from torch import nn, optim
22
+ from torch.optim import lr_scheduler
23
+ from torch.optim.lr_scheduler import LRScheduler
24
+
25
+ from models.bandit.core import loss, metrics as metrics_, model
26
+ from models.bandit.core.data._types import BatchedDataDict
27
+ from models.bandit.core.data.augmentation import BaseAugmentor, StemAugmentor
28
+ from models.bandit.core.utils import audio as audio_
29
+ from models.bandit.core.utils.audio import BaseFader
30
+
31
+ # from pandas.io.json._normalize import nested_to_record
32
+
33
+ ConfigDict = TypedDict('ConfigDict', {'name': str, 'kwargs': Dict[str, Any]})
34
+
35
+
36
+ class SchedulerConfigDict(ConfigDict):
37
+ monitor: str
38
+
39
+
40
+ OptimizerSchedulerConfigDict = TypedDict(
41
+ 'OptimizerSchedulerConfigDict',
42
+ {"optimizer": ConfigDict, "scheduler": SchedulerConfigDict},
43
+ total=False
44
+ )
45
+
46
+
47
+ class LRSchedulerReturnDict(TypedDict, total=False):
48
+ scheduler: LRScheduler
49
+ monitor: str
50
+
51
+
52
+ class ConfigureOptimizerReturnDict(TypedDict, total=False):
53
+ optimizer: torch.optim.Optimizer
54
+ lr_scheduler: LRSchedulerReturnDict
55
+
56
+
57
+ OutputType = Dict[str, Any]
58
+ MetricsType = Dict[str, torch.Tensor]
59
+
60
+
61
+ def get_optimizer_class(name: str) -> Type[optim.Optimizer]:
62
+
63
+ if name == "DeepSpeedCPUAdam":
64
+ return DeepSpeedCPUAdam
65
+
66
+ for module in [optim, gooptim]:
67
+ if name in module.__dict__:
68
+ return module.__dict__[name]
69
+
70
+ raise NameError
71
+
72
+
73
+ def parse_optimizer_config(
74
+ config: OptimizerSchedulerConfigDict,
75
+ parameters: Iterator[nn.Parameter]
76
+ ) -> ConfigureOptimizerReturnDict:
77
+ optim_class = get_optimizer_class(config["optimizer"]["name"])
78
+ optimizer = optim_class(parameters, **config["optimizer"]["kwargs"])
79
+
80
+ optim_dict: ConfigureOptimizerReturnDict = {
81
+ "optimizer": optimizer,
82
+ }
83
+
84
+ if "scheduler" in config:
85
+
86
+ lr_scheduler_class_ = config["scheduler"]["name"]
87
+ lr_scheduler_class = lr_scheduler.__dict__[lr_scheduler_class_]
88
+ lr_scheduler_dict: LRSchedulerReturnDict = {
89
+ "scheduler": lr_scheduler_class(
90
+ optimizer,
91
+ **config["scheduler"]["kwargs"]
92
+ )
93
+ }
94
+
95
+ if lr_scheduler_class_ == "ReduceLROnPlateau":
96
+ lr_scheduler_dict["monitor"] = config["scheduler"]["monitor"]
97
+
98
+ optim_dict["lr_scheduler"] = lr_scheduler_dict
99
+
100
+ return optim_dict
101
+
102
+
103
+ def parse_model_config(config: ConfigDict) -> Any:
104
+ name = config["name"]
105
+
106
+ for module in [model]:
107
+ if name in module.__dict__:
108
+ return module.__dict__[name](**config["kwargs"])
109
+
110
+ raise NameError
111
+
112
+
113
+ _LEGACY_LOSS_NAMES = ["HybridL1Loss"]
114
+
115
+
116
+ def _parse_legacy_loss_config(config: ConfigDict) -> nn.Module:
117
+ name = config["name"]
118
+
119
+ if name == "HybridL1Loss":
120
+ return loss.TimeFreqL1Loss(**config["kwargs"])
121
+
122
+ raise NameError
123
+
124
+
125
+ def parse_loss_config(config: ConfigDict) -> nn.Module:
126
+ name = config["name"]
127
+
128
+ if name in _LEGACY_LOSS_NAMES:
129
+ return _parse_legacy_loss_config(config)
130
+
131
+ for module in [loss, nn.modules.loss, asteroid_losses]:
132
+ if name in module.__dict__:
133
+ # print(config["kwargs"])
134
+ return module.__dict__[name](**config["kwargs"])
135
+
136
+ raise NameError
137
+
138
+
139
+ def get_metric(config: ConfigDict) -> tm.Metric:
140
+ name = config["name"]
141
+
142
+ for module in [tm, metrics_]:
143
+ if name in module.__dict__:
144
+ return module.__dict__[name](**config["kwargs"])
145
+ raise NameError
146
+
147
+
148
+ def parse_metric_config(config: Dict[str, ConfigDict]) -> tm.MetricCollection:
149
+ metrics = {}
150
+
151
+ for metric in config:
152
+ metrics[metric] = get_metric(config[metric])
153
+
154
+ return tm.MetricCollection(metrics)
155
+
156
+
157
+ def parse_fader_config(config: ConfigDict) -> BaseFader:
158
+ name = config["name"]
159
+
160
+ for module in [audio_]:
161
+ if name in module.__dict__:
162
+ return module.__dict__[name](**config["kwargs"])
163
+
164
+ raise NameError
165
+
166
+
167
+ class LightningSystem(pl.LightningModule):
168
+ _VOX_STEMS = ["speech", "vocals"]
169
+ _BG_STEMS = ["background", "effects", "mne"]
170
+
171
+ def __init__(
172
+ self,
173
+ config: Dict,
174
+ loss_adjustment: float = 1.0,
175
+ attach_fader: bool = False
176
+ ) -> None:
177
+ super().__init__()
178
+ self.optimizer_config = config["optimizer"]
179
+ self.model = parse_model_config(config["model"])
180
+ self.loss = parse_loss_config(config["loss"])
181
+ self.metrics = nn.ModuleDict(
182
+ {
183
+ stem: parse_metric_config(config["metrics"]["dev"])
184
+ for stem in self.model.stems
185
+ }
186
+ )
187
+
188
+ self.metrics.disallow_fsdp = True
189
+
190
+ self.test_metrics = nn.ModuleDict(
191
+ {
192
+ stem: parse_metric_config(config["metrics"]["test"])
193
+ for stem in self.model.stems
194
+ }
195
+ )
196
+
197
+ self.test_metrics.disallow_fsdp = True
198
+
199
+ self.fs = config["model"]["kwargs"]["fs"]
200
+
201
+ self.fader_config = config["inference"]["fader"]
202
+ if attach_fader:
203
+ self.fader = parse_fader_config(config["inference"]["fader"])
204
+ else:
205
+ self.fader = None
206
+
207
+ self.augmentation: Optional[BaseAugmentor]
208
+ if config.get("augmentation", None) is not None:
209
+ self.augmentation = StemAugmentor(**config["augmentation"])
210
+ else:
211
+ self.augmentation = None
212
+
213
+ self.predict_output_path: Optional[str] = None
214
+ self.loss_adjustment = loss_adjustment
215
+
216
+ self.val_prefix = None
217
+ self.test_prefix = None
218
+
219
+
220
+ def configure_optimizers(self) -> Any:
221
+ return parse_optimizer_config(
222
+ self.optimizer_config,
223
+ self.trainer.model.parameters()
224
+ )
225
+
226
+ def compute_loss(self, batch: BatchedDataDict, output: OutputType) -> Dict[
227
+ str, torch.Tensor]:
228
+ return {"loss": self.loss(output, batch)}
229
+
230
+ def update_metrics(
231
+ self,
232
+ batch: BatchedDataDict,
233
+ output: OutputType,
234
+ mode: str
235
+ ) -> None:
236
+
237
+ if mode == "test":
238
+ metrics = self.test_metrics
239
+ else:
240
+ metrics = self.metrics
241
+
242
+ for stem, metric in metrics.items():
243
+
244
+ if stem == "mne:+":
245
+ stem = "mne"
246
+
247
+ # print(f"matching for {stem}")
248
+ if mode == "train":
249
+ metric.update(
250
+ output["audio"][stem],#.cpu(),
251
+ batch["audio"][stem],#.cpu()
252
+ )
253
+ else:
254
+ if stem not in batch["audio"]:
255
+ matched = False
256
+ if stem in self._VOX_STEMS:
257
+ for bstem in self._VOX_STEMS:
258
+ if bstem in batch["audio"]:
259
+ batch["audio"][stem] = batch["audio"][bstem]
260
+ matched = True
261
+ break
262
+ elif stem in self._BG_STEMS:
263
+ for bstem in self._BG_STEMS:
264
+ if bstem in batch["audio"]:
265
+ batch["audio"][stem] = batch["audio"][bstem]
266
+ matched = True
267
+ break
268
+ else:
269
+ matched = True
270
+
271
+ # print(batch["audio"].keys())
272
+
273
+ if matched:
274
+ # print(f"matched {stem}!")
275
+ if stem == "mne" and "mne" not in output["audio"]:
276
+ output["audio"]["mne"] = output["audio"]["music"] + output["audio"]["effects"]
277
+
278
+ metric.update(
279
+ output["audio"][stem],#.cpu(),
280
+ batch["audio"][stem],#.cpu(),
281
+ )
282
+
283
+ # print(metric.compute())
284
+ def compute_metrics(self, mode: str="dev") -> Dict[
285
+ str, torch.Tensor]:
286
+
287
+ if mode == "test":
288
+ metrics = self.test_metrics
289
+ else:
290
+ metrics = self.metrics
291
+
292
+ metric_dict = {}
293
+
294
+ for stem, metric in metrics.items():
295
+ md = metric.compute()
296
+ metric_dict.update(
297
+ {f"{stem}/{k}": v for k, v in md.items()}
298
+ )
299
+
300
+ self.log_dict(metric_dict, prog_bar=True, logger=False)
301
+
302
+ return metric_dict
303
+
304
+ def reset_metrics(self, test_mode: bool = False) -> None:
305
+
306
+ if test_mode:
307
+ metrics = self.test_metrics
308
+ else:
309
+ metrics = self.metrics
310
+
311
+ for _, metric in metrics.items():
312
+ metric.reset()
313
+
314
+
315
+ def forward(self, batch: BatchedDataDict) -> Any:
316
+ batch, output = self.model(batch)
317
+
318
+
319
+ return batch, output
320
+
321
+ def common_step(self, batch: BatchedDataDict, mode: str) -> Any:
322
+ batch, output = self.forward(batch)
323
+ # print(batch)
324
+ # print(output)
325
+ loss_dict = self.compute_loss(batch, output)
326
+
327
+ with torch.no_grad():
328
+ self.update_metrics(batch, output, mode=mode)
329
+
330
+ if mode == "train":
331
+ self.log("loss", loss_dict["loss"], prog_bar=True)
332
+
333
+ return output, loss_dict
334
+
335
+
336
+ def training_step(self, batch: BatchedDataDict) -> Dict[str, Any]:
337
+
338
+ if self.augmentation is not None:
339
+ with torch.no_grad():
340
+ batch = self.augmentation(batch)
341
+
342
+ _, loss_dict = self.common_step(batch, mode="train")
343
+
344
+ with torch.inference_mode():
345
+ self.log_dict_with_prefix(
346
+ loss_dict,
347
+ "train",
348
+ batch_size=batch["audio"]["mixture"].shape[0]
349
+ )
350
+
351
+ loss_dict["loss"] *= self.loss_adjustment
352
+
353
+ return loss_dict
354
+
355
+ def on_train_batch_end(
356
+ self, outputs: STEP_OUTPUT, batch: BatchedDataDict, batch_idx: int
357
+ ) -> None:
358
+
359
+ metric_dict = self.compute_metrics()
360
+ self.log_dict_with_prefix(metric_dict, "train")
361
+ self.reset_metrics()
362
+
363
+ def validation_step(
364
+ self,
365
+ batch: BatchedDataDict,
366
+ batch_idx: int,
367
+ dataloader_idx: int = 0
368
+ ) -> Dict[str, Any]:
369
+
370
+ with torch.inference_mode():
371
+ curr_val_prefix = f"val{dataloader_idx}" if dataloader_idx > 0 else "val"
372
+
373
+ if curr_val_prefix != self.val_prefix:
374
+ # print(f"Switching to validation dataloader {dataloader_idx}")
375
+ if self.val_prefix is not None:
376
+ self._on_validation_epoch_end()
377
+ self.val_prefix = curr_val_prefix
378
+ _, loss_dict = self.common_step(batch, mode="val")
379
+
380
+ self.log_dict_with_prefix(
381
+ loss_dict,
382
+ self.val_prefix,
383
+ batch_size=batch["audio"]["mixture"].shape[0],
384
+ prog_bar=True,
385
+ add_dataloader_idx=False
386
+ )
387
+
388
+ return loss_dict
389
+
390
+ def on_validation_epoch_end(self) -> None:
391
+ self._on_validation_epoch_end()
392
+
393
+ def _on_validation_epoch_end(self) -> None:
394
+ metric_dict = self.compute_metrics()
395
+ self.log_dict_with_prefix(metric_dict, self.val_prefix, prog_bar=True,
396
+ add_dataloader_idx=False)
397
+ # self.logger.save()
398
+ # print(self.val_prefix, "Validation metrics:", metric_dict)
399
+ self.reset_metrics()
400
+
401
+
402
+ def old_predtest_step(
403
+ self,
404
+ batch: BatchedDataDict,
405
+ batch_idx: int,
406
+ dataloader_idx: int = 0
407
+ ) -> Tuple[BatchedDataDict, OutputType]:
408
+
409
+ audio_batch = batch["audio"]["mixture"]
410
+ track_batch = batch.get("track", ["" for _ in range(len(audio_batch))])
411
+
412
+ output_list_of_dicts = [
413
+ self.fader(
414
+ audio[None, ...],
415
+ lambda a: self.test_forward(a, track)
416
+ )
417
+ for audio, track in zip(audio_batch, track_batch)
418
+ ]
419
+
420
+ output_dict_of_lists = defaultdict(list)
421
+
422
+ for output_dict in output_list_of_dicts:
423
+ for stem, audio in output_dict.items():
424
+ output_dict_of_lists[stem].append(audio)
425
+
426
+ output = {
427
+ "audio": {
428
+ stem: torch.concat(output_list, dim=0)
429
+ for stem, output_list in output_dict_of_lists.items()
430
+ }
431
+ }
432
+
433
+ return batch, output
434
+
435
+ def predtest_step(
436
+ self,
437
+ batch: BatchedDataDict,
438
+ batch_idx: int = -1,
439
+ dataloader_idx: int = 0
440
+ ) -> Tuple[BatchedDataDict, OutputType]:
441
+
442
+ if getattr(self.model, "bypass_fader", False):
443
+ batch, output = self.model(batch)
444
+ else:
445
+ audio_batch = batch["audio"]["mixture"]
446
+ output = self.fader(
447
+ audio_batch,
448
+ lambda a: self.test_forward(a, "", batch=batch)
449
+ )
450
+
451
+ return batch, output
452
+
453
+ def test_forward(
454
+ self,
455
+ audio: torch.Tensor,
456
+ track: str = "",
457
+ batch: BatchedDataDict = None
458
+ ) -> torch.Tensor:
459
+
460
+ if self.fader is None:
461
+ self.attach_fader()
462
+
463
+ cond = batch.get("condition", None)
464
+
465
+ if cond is not None and cond.shape[0] == 1:
466
+ cond = cond.repeat(audio.shape[0], 1)
467
+
468
+ _, output = self.forward(
469
+ {"audio": {"mixture": audio},
470
+ "track": track,
471
+ "condition": cond,
472
+ }
473
+ ) # TODO: support track properly
474
+
475
+ return output["audio"]
476
+
477
+ def on_test_epoch_start(self) -> None:
478
+ self.attach_fader(force_reattach=True)
479
+
480
+ def test_step(
481
+ self,
482
+ batch: BatchedDataDict,
483
+ batch_idx: int,
484
+ dataloader_idx: int = 0
485
+ ) -> Any:
486
+ curr_test_prefix = f"test{dataloader_idx}"
487
+
488
+ # print(batch["audio"].keys())
489
+
490
+ if curr_test_prefix != self.test_prefix:
491
+ # print(f"Switching to test dataloader {dataloader_idx}")
492
+ if self.test_prefix is not None:
493
+ self._on_test_epoch_end()
494
+ self.test_prefix = curr_test_prefix
495
+
496
+ with torch.inference_mode():
497
+ _, output = self.predtest_step(batch, batch_idx, dataloader_idx)
498
+ # print(output)
499
+ self.update_metrics(batch, output, mode="test")
500
+
501
+ return output
502
+
503
+ def on_test_epoch_end(self) -> None:
504
+ self._on_test_epoch_end()
505
+
506
+ def _on_test_epoch_end(self) -> None:
507
+ metric_dict = self.compute_metrics(mode="test")
508
+ self.log_dict_with_prefix(metric_dict, self.test_prefix, prog_bar=True,
509
+ add_dataloader_idx=False)
510
+ # self.logger.save()
511
+ # print(self.test_prefix, "Test metrics:", metric_dict)
512
+ self.reset_metrics()
513
+
514
+ def predict_step(
515
+ self,
516
+ batch: BatchedDataDict,
517
+ batch_idx: int = 0,
518
+ dataloader_idx: int = 0,
519
+ include_track_name: Optional[bool] = None,
520
+ get_no_vox_combinations: bool = True,
521
+ get_residual: bool = False,
522
+ treat_batch_as_channels: bool = False,
523
+ fs: Optional[int] = None,
524
+ ) -> Any:
525
+ assert self.predict_output_path is not None
526
+
527
+ batch_size = batch["audio"]["mixture"].shape[0]
528
+
529
+ if include_track_name is None:
530
+ include_track_name = batch_size > 1
531
+
532
+ with torch.inference_mode():
533
+ batch, output = self.predtest_step(batch, batch_idx, dataloader_idx)
534
+ print('Pred test finished...')
535
+ torch.cuda.empty_cache()
536
+ metric_dict = {}
537
+
538
+ if get_residual:
539
+ mixture = batch["audio"]["mixture"]
540
+ extracted = sum([output["audio"][stem] for stem in output["audio"]])
541
+ residual = mixture - extracted
542
+ print(extracted.shape, mixture.shape, residual.shape)
543
+
544
+ output["audio"]["residual"] = residual
545
+
546
+ if get_no_vox_combinations:
547
+ no_vox_stems = [
548
+ stem for stem in output["audio"] if
549
+ stem not in self._VOX_STEMS
550
+ ]
551
+ no_vox_combinations = chain.from_iterable(
552
+ combinations(no_vox_stems, r) for r in
553
+ range(2, len(no_vox_stems) + 1)
554
+ )
555
+
556
+ for combination in no_vox_combinations:
557
+ combination_ = list(combination)
558
+ output["audio"]["+".join(combination_)] = sum(
559
+ [output["audio"][stem] for stem in combination_]
560
+ )
561
+
562
+ if treat_batch_as_channels:
563
+ for stem in output["audio"]:
564
+ output["audio"][stem] = output["audio"][stem].reshape(
565
+ 1, -1, output["audio"][stem].shape[-1]
566
+ )
567
+ batch_size = 1
568
+
569
+ for b in range(batch_size):
570
+ print("!!", b)
571
+ for stem in output["audio"]:
572
+ print(f"Saving audio for {stem} to {self.predict_output_path}")
573
+ track_name = batch["track"][b].split("/")[-1]
574
+
575
+ if batch.get("audio", {}).get(stem, None) is not None:
576
+ self.test_metrics[stem].reset()
577
+ metrics = self.test_metrics[stem](
578
+ batch["audio"][stem][[b], ...],
579
+ output["audio"][stem][[b], ...]
580
+ )
581
+ snr = metrics["snr"]
582
+ sisnr = metrics["sisnr"]
583
+ sdr = metrics["sdr"]
584
+ metric_dict[stem] = metrics
585
+ print(
586
+ track_name,
587
+ f"snr={snr:2.2f} dB",
588
+ f"sisnr={sisnr:2.2f}",
589
+ f"sdr={sdr:2.2f} dB",
590
+ )
591
+ filename = f"{stem} - snr={snr:2.2f}dB - sdr={sdr:2.2f}dB.wav"
592
+ else:
593
+ filename = f"{stem}.wav"
594
+
595
+ if include_track_name:
596
+ output_dir = os.path.join(
597
+ self.predict_output_path,
598
+ track_name
599
+ )
600
+ else:
601
+ output_dir = self.predict_output_path
602
+
603
+ os.makedirs(output_dir, exist_ok=True)
604
+
605
+ if fs is None:
606
+ fs = self.fs
607
+
608
+ ta.save(
609
+ os.path.join(output_dir, filename),
610
+ output["audio"][stem][b, ...].cpu(),
611
+ fs,
612
+ )
613
+
614
+ return metric_dict
615
+
616
+ def get_stems(
617
+ self,
618
+ batch: BatchedDataDict,
619
+ batch_idx: int = 0,
620
+ dataloader_idx: int = 0,
621
+ include_track_name: Optional[bool] = None,
622
+ get_no_vox_combinations: bool = True,
623
+ get_residual: bool = False,
624
+ treat_batch_as_channels: bool = False,
625
+ fs: Optional[int] = None,
626
+ ) -> Any:
627
+ assert self.predict_output_path is not None
628
+
629
+ batch_size = batch["audio"]["mixture"].shape[0]
630
+
631
+ if include_track_name is None:
632
+ include_track_name = batch_size > 1
633
+
634
+ with torch.inference_mode():
635
+ batch, output = self.predtest_step(batch, batch_idx, dataloader_idx)
636
+ torch.cuda.empty_cache()
637
+ metric_dict = {}
638
+
639
+ if get_residual:
640
+ mixture = batch["audio"]["mixture"]
641
+ extracted = sum([output["audio"][stem] for stem in output["audio"]])
642
+ residual = mixture - extracted
643
+ # print(extracted.shape, mixture.shape, residual.shape)
644
+
645
+ output["audio"]["residual"] = residual
646
+
647
+ if get_no_vox_combinations:
648
+ no_vox_stems = [
649
+ stem for stem in output["audio"] if
650
+ stem not in self._VOX_STEMS
651
+ ]
652
+ no_vox_combinations = chain.from_iterable(
653
+ combinations(no_vox_stems, r) for r in
654
+ range(2, len(no_vox_stems) + 1)
655
+ )
656
+
657
+ for combination in no_vox_combinations:
658
+ combination_ = list(combination)
659
+ output["audio"]["+".join(combination_)] = sum(
660
+ [output["audio"][stem] for stem in combination_]
661
+ )
662
+
663
+ if treat_batch_as_channels:
664
+ for stem in output["audio"]:
665
+ output["audio"][stem] = output["audio"][stem].reshape(
666
+ 1, -1, output["audio"][stem].shape[-1]
667
+ )
668
+ batch_size = 1
669
+
670
+ result = {}
671
+ for b in range(batch_size):
672
+ for stem in output["audio"]:
673
+ track_name = batch["track"][b].split("/")[-1]
674
+
675
+ if batch.get("audio", {}).get(stem, None) is not None:
676
+ self.test_metrics[stem].reset()
677
+ metrics = self.test_metrics[stem](
678
+ batch["audio"][stem][[b], ...],
679
+ output["audio"][stem][[b], ...]
680
+ )
681
+ snr = metrics["snr"]
682
+ sisnr = metrics["sisnr"]
683
+ sdr = metrics["sdr"]
684
+ metric_dict[stem] = metrics
685
+ print(
686
+ track_name,
687
+ f"snr={snr:2.2f} dB",
688
+ f"sisnr={sisnr:2.2f}",
689
+ f"sdr={sdr:2.2f} dB",
690
+ )
691
+ filename = f"{stem} - snr={snr:2.2f}dB - sdr={sdr:2.2f}dB.wav"
692
+ else:
693
+ filename = f"{stem}.wav"
694
+
695
+ if include_track_name:
696
+ output_dir = os.path.join(
697
+ self.predict_output_path,
698
+ track_name
699
+ )
700
+ else:
701
+ output_dir = self.predict_output_path
702
+
703
+ os.makedirs(output_dir, exist_ok=True)
704
+
705
+ if fs is None:
706
+ fs = self.fs
707
+
708
+ result[stem] = output["audio"][stem][b, ...].cpu().numpy()
709
+
710
+ return result
711
+
712
+ def load_state_dict(
713
+ self, state_dict: Mapping[str, Any], strict: bool = False
714
+ ) -> Any:
715
+
716
+ return super().load_state_dict(state_dict, strict=False)
717
+
718
+
719
+ def set_predict_output_path(self, path: str) -> None:
720
+ self.predict_output_path = path
721
+ os.makedirs(self.predict_output_path, exist_ok=True)
722
+
723
+ self.attach_fader()
724
+
725
+ def attach_fader(self, force_reattach=False) -> None:
726
+ if self.fader is None or force_reattach:
727
+ self.fader = parse_fader_config(self.fader_config)
728
+ self.fader.to(self.device)
729
+
730
+
731
+ def log_dict_with_prefix(
732
+ self,
733
+ dict_: Dict[str, torch.Tensor],
734
+ prefix: str,
735
+ batch_size: Optional[int] = None,
736
+ **kwargs: Any
737
+ ) -> None:
738
+ self.log_dict(
739
+ {f"{prefix}/{k}": v for k, v in dict_.items()},
740
+ batch_size=batch_size,
741
+ logger=True,
742
+ sync_dist=True,
743
+ **kwargs,
744
+ )
data_pipeline/seperation/models/bandit/core/data/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .dnr.datamodule import DivideAndRemasterDataModule
2
+ from .musdb.datamodule import MUSDB18DataModule
data_pipeline/seperation/models/bandit/core/data/_types.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Sequence, TypedDict
2
+
3
+ import torch
4
+
5
+ AudioDict = Dict[str, torch.Tensor]
6
+
7
+ DataDict = TypedDict('DataDict', {'audio': AudioDict, 'track': str})
8
+
9
+ BatchedDataDict = TypedDict(
10
+ 'BatchedDataDict',
11
+ {'audio': AudioDict, 'track': Sequence[str]}
12
+ )
13
+
14
+
15
+ class DataDictWithLanguage(TypedDict):
16
+ audio: AudioDict
17
+ track: str
18
+ language: str
data_pipeline/seperation/models/bandit/core/data/augmentation.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+ from typing import Any, Dict, Union
3
+
4
+ import torch
5
+ import torch_audiomentations as tam
6
+ from torch import nn
7
+
8
+ from models.bandit.core.data._types import BatchedDataDict, DataDict
9
+
10
+
11
+ class BaseAugmentor(nn.Module, ABC):
12
+ def forward(self, item: Union[DataDict, BatchedDataDict]) -> Union[
13
+ DataDict, BatchedDataDict]:
14
+ raise NotImplementedError
15
+
16
+
17
+ class StemAugmentor(BaseAugmentor):
18
+ def __init__(
19
+ self,
20
+ audiomentations: Dict[str, Dict[str, Any]],
21
+ fix_clipping: bool = True,
22
+ scaler_margin: float = 0.5,
23
+ apply_both_default_and_common: bool = False,
24
+ ) -> None:
25
+ super().__init__()
26
+
27
+ augmentations = {}
28
+
29
+ self.has_default = "[default]" in audiomentations
30
+ self.has_common = "[common]" in audiomentations
31
+ self.apply_both_default_and_common = apply_both_default_and_common
32
+
33
+ for stem in audiomentations:
34
+ if audiomentations[stem]["name"] == "Compose":
35
+ augmentations[stem] = getattr(
36
+ tam,
37
+ audiomentations[stem]["name"]
38
+ )(
39
+ [
40
+ getattr(tam, aug["name"])(**aug["kwargs"])
41
+ for aug in
42
+ audiomentations[stem]["kwargs"]["transforms"]
43
+ ],
44
+ **audiomentations[stem]["kwargs"]["kwargs"],
45
+ )
46
+ else:
47
+ augmentations[stem] = getattr(
48
+ tam,
49
+ audiomentations[stem]["name"]
50
+ )(
51
+ **audiomentations[stem]["kwargs"]
52
+ )
53
+
54
+ self.augmentations = nn.ModuleDict(augmentations)
55
+ self.fix_clipping = fix_clipping
56
+ self.scaler_margin = scaler_margin
57
+
58
+ def check_and_fix_clipping(
59
+ self, item: Union[DataDict, BatchedDataDict]
60
+ ) -> Union[DataDict, BatchedDataDict]:
61
+ max_abs = []
62
+
63
+ for stem in item["audio"]:
64
+ max_abs.append(item["audio"][stem].abs().max().item())
65
+
66
+ if max(max_abs) > 1.0:
67
+ scaler = 1.0 / (max(max_abs) + torch.rand(
68
+ (1,),
69
+ device=item["audio"]["mixture"].device
70
+ ) * self.scaler_margin)
71
+
72
+ for stem in item["audio"]:
73
+ item["audio"][stem] *= scaler
74
+
75
+ return item
76
+
77
+ def forward(self, item: Union[DataDict, BatchedDataDict]) -> Union[
78
+ DataDict, BatchedDataDict]:
79
+
80
+ for stem in item["audio"]:
81
+ if stem == "mixture":
82
+ continue
83
+
84
+ if self.has_common:
85
+ item["audio"][stem] = self.augmentations["[common]"](
86
+ item["audio"][stem]
87
+ ).samples
88
+
89
+ if stem in self.augmentations:
90
+ item["audio"][stem] = self.augmentations[stem](
91
+ item["audio"][stem]
92
+ ).samples
93
+ elif self.has_default:
94
+ if not self.has_common or self.apply_both_default_and_common:
95
+ item["audio"][stem] = self.augmentations["[default]"](
96
+ item["audio"][stem]
97
+ ).samples
98
+
99
+ item["audio"]["mixture"] = sum(
100
+ [item["audio"][stem] for stem in item["audio"]
101
+ if stem != "mixture"]
102
+ ) # type: ignore[call-overload, assignment]
103
+
104
+ if self.fix_clipping:
105
+ item = self.check_and_fix_clipping(item)
106
+
107
+ return item
data_pipeline/seperation/models/bandit/core/data/augmented.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Dict, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.utils import data
7
+
8
+
9
+ class AugmentedDataset(data.Dataset):
10
+ def __init__(
11
+ self,
12
+ dataset: data.Dataset,
13
+ augmentation: nn.Module = nn.Identity(),
14
+ target_length: Optional[int] = None,
15
+ ) -> None:
16
+ warnings.warn(
17
+ "This class is no longer used. Attach augmentation to "
18
+ "the LightningSystem instead.",
19
+ DeprecationWarning,
20
+ )
21
+
22
+ self.dataset = dataset
23
+ self.augmentation = augmentation
24
+
25
+ self.ds_length: int = len(dataset) # type: ignore[arg-type]
26
+ self.length = target_length if target_length is not None else self.ds_length
27
+
28
+ def __getitem__(self, index: int) -> Dict[str, Union[str, Dict[str,
29
+ torch.Tensor]]]:
30
+ item = self.dataset[index % self.ds_length]
31
+ item = self.augmentation(item)
32
+ return item
33
+
34
+ def __len__(self) -> int:
35
+ return self.length