diff --git a/.gitattributes b/.gitattributes index 55cab133643a2a73e083373d2106533678d0edd5..dc9033a8db54d2c5439707e21246a55e40d632a9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +data_pipeline/ssl/km_xlsr_1024_18l filter=lfs diff=lfs merge=lfs -text +data_pipeline/ssl/km_xlsr_512_18l filter=lfs diff=lfs merge=lfs -text +rap_songs.csv filter=lfs diff=lfs merge=lfs -text diff --git a/data_pipeline/asr/faster_whisper_mp.py b/data_pipeline/asr/faster_whisper_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ad36731b2782c0feb544b3e3fc9666865853f4 --- /dev/null +++ b/data_pipeline/asr/faster_whisper_mp.py @@ -0,0 +1,179 @@ +import sys +import os +#from tqdm import tqdm +import torch +import torch.multiprocessing as mp +import threading +#import librosa +#import numpy as np +from faster_whisper import WhisperModel +#import whisper +import glob +import fcntl +import argparse +import traceback +from tqdm import tqdm +import numpy as np +import librosa +import soxr +import multiprocessing + +def normalize_audio(y, target_dbfs=0): + max_amplitude = np.max(np.abs(y)) + if max_amplitude < 0.1: + return y + + target_amplitude = 10.0**(target_dbfs / 20.0) + scale_factor = target_amplitude / max_amplitude + + normalized_audio = y * scale_factor + + return normalized_audio +file_lock = multiprocessing.Lock() + +def inference(rank, ckpt_path, text_path, queue: mp.Queue): + device = f"cuda" + model = WhisperModel(ckpt_path, device=device, device_index=rank, compute_type="float16") + puncs = list(",.?!") + buffer = "" + def write_to_file(data): + with file_lock: + with open(text_path, 'a') as f: + f.write(data) + + + with torch.no_grad(): + while True: + #print(texts) + filename = queue.get() + if filename is None: + write_to_file(buffer) + break + filename = filename[0] + + try: + audio_path = filename + audio, sr = librosa.load(audio_path, sr=None) + audio = normalize_audio(audio, -6) + audio = soxr.resample( + audio, + sr, + 16000 + ) + segments, info = model.transcribe(audio, beam_size=3, vad_filter=True, condition_on_previous_text=False) + text = "" + + for segment in segments: + text_segment = segment.text + text_segment.strip() + if len(text_segment) == 0: + continue + if not text_segment[-1] in puncs: + text_segment += "," + text = text + " " + text_segment + text = text.replace(" ", " ") + text = text.strip() + if len(text) == 0: + continue + if text[-1] == ",": + text = text[:-1] + "." + + buffer += f"{filename}|{text}|{info.language}|{info.language_probability}\n" + if len(buffer) > 10000: + write_to_file(buffer) + buffer = "" + + except Exception as e: + print(filename) + traceback.print_exc() + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + +last_batches = None + +@setInterval(5) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + +if __name__ == "__main__": + #audio_dir = sys.argv[1] + parser = argparse.ArgumentParser() + parser.add_argument("--filelist_or_dir", type=str, required=True) + parser.add_argument("--text_path", type=str, required=True, help="Dir to save output") + parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints") + parser.add_argument("--ckpt_path", type=str, required=False, default="large-v3") + parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="For aml compability") + parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="For aml compability") + args = parser.parse_args() + + mp.set_start_method('spawn',force=True) + + filelist_or_dir = args.filelist_or_dir + text_path = args.text_path + jobs = args.jobs + ckpt_path = args.ckpt_path + os.makedirs(text_path, exist_ok=True) + model = WhisperModel(ckpt_path, device='cpu') # download model in one thread + del(model) + + if os.path.isfile(filelist_or_dir): + filelist_name = filelist_or_dir.split('/')[-1].split('.')[0] + generator = open(filelist_or_dir).read().splitlines() + text_path = os.path.join(text_path, f"{filelist_name}_text.txt") + else: + filelist_name = "single" + generator = glob.glob(f"{filelist_or_dir}/*.wav") + text_path = os.path.join(text_path, "text.txt") + + os.system(f"rm {text_path}") + + gpu_num = torch.cuda.device_count() + + processes = [] + queue = mp.Queue() + for thread_num in range(jobs): + + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, ckpt_path, text_path, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + for filename in generator: + accum.append(filename) + if len(accum) == 1: + queue.put(accum.copy()) + accum.clear() + + + for _ in range(jobs): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc='whisper') + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() \ No newline at end of file diff --git a/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.ckpt b/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..1d4b892da79c875b3b3028f9f4d2504ebafe72e1 --- /dev/null +++ b/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b84f37e8d444c8cb30c79d77f613a41c05868ff9c9ac6c7049c00aefae115aa +size 639331213 diff --git a/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.yaml b/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.yaml new file mode 100644 index 0000000000000000000000000000000000000000..135a051897dee27285ac46ee350afe1e1ec02011 --- /dev/null +++ b/data_pipeline/ckpts/bs_roformer/model_bs_roformer_ep_317_sdr_12.9755.yaml @@ -0,0 +1,126 @@ +audio: + chunk_size: 352800 + dim_f: 1024 + dim_t: 801 # don't work (use in model) + hop_length: 441 # don't work (use in model) + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + dim: 512 + depth: 12 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: true + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 441 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 2 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 1.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +inference: + batch_size: 4 + dim_t: 801 + num_overlap: 2 \ No newline at end of file diff --git a/data_pipeline/ckpts/dnsmos_p808.onnx b/data_pipeline/ckpts/dnsmos_p808.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0e04b14824c4dfc6af9d62040c92c09da56f21e7 --- /dev/null +++ b/data_pipeline/ckpts/dnsmos_p808.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9246480c58567bc6affd4200938e77eef49468c8bc7ed3776d109c07456f6e91 +size 224860 diff --git a/data_pipeline/ckpts/wav2vec2_xlsr_300m.pth b/data_pipeline/ckpts/wav2vec2_xlsr_300m.pth new file mode 100644 index 0000000000000000000000000000000000000000..77075b93f40a50d1315104689a5606f54ccaf7fc --- /dev/null +++ b/data_pipeline/ckpts/wav2vec2_xlsr_300m.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad6f8ff9711c7acc72fbc6ffcdba6bc2582fba92c6b056d71de8a4ed77b6b22 +size 1261921380 diff --git a/data_pipeline/duration/duration_mutagen.py b/data_pipeline/duration/duration_mutagen.py new file mode 100644 index 0000000000000000000000000000000000000000..db6da63e4c3fedee025f9333a588899a7972825d --- /dev/null +++ b/data_pipeline/duration/duration_mutagen.py @@ -0,0 +1,39 @@ +import os +import glob +import sys +from tqdm import tqdm +import matplotlib.pyplot as plt +import numpy as np +from mutagen.wave import WAVE # mutagen for reading wav metadata + +filelist_or_dir = sys.argv[1] # filelist including absolute path or data root path + +total_duration = 0. +durations = [] + + +def get_wav_duration(file_path): + try: + duration = WAVE(file_path).info.length + return duration + except Exception as e: + print('Error occurred:', e) + return None + +if os.path.isdir(filelist_or_dir): + filelist = [os.path.join(filelist_or_dir, filename) for filename in glob.glob(os.path.join(filelist_or_dir, '**/*.wav'), recursive=True)] +else: + filelist = open(filelist_or_dir, 'r').read().splitlines() +for wav_path in tqdm(filelist): + try: + duration = get_wav_duration(wav_path) + total_duration += duration + durations.append(duration) + except Exception as e: + print(e) + +print(f"total_duration: {total_duration}, avg_duration: {total_duration / len(durations)}") + +#plt.hist(durations, bins=50, range=(0, 50)) +#plt.savefig(os.path.join(os.path.dirname(data_root), "durations.png")) +#np.save(os.path.join(os.path.dirname(data_root), "1.npy"), np.array(durations)) \ No newline at end of file diff --git a/data_pipeline/g2p_en.py b/data_pipeline/g2p_en.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d514ab716d40e0185facf49b2e1ef8ec471728 --- /dev/null +++ b/data_pipeline/g2p_en.py @@ -0,0 +1,138 @@ +""" from https://github.com/keithito/tacotron """ + +''' +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). +''' + +import re +from unidecode import unidecode +from phonemizer import phonemize +from phonemizer.backend import EspeakBackend +import matplotlib.pyplot as plt +import traceback +import sys +import os +from tqdm import tqdm +import numpy as np + + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r'\s+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def expand_numbers(text): + return normalize_numbers(text) + + +def lowercase(text): + return text.lower() + + +def collapse_whitespace(text): + return re.sub(_whitespace_re, ' ', text) + + +def convert_to_ascii(text): + return unidecode(text) + + +def basic_cleaners(text): + '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def transliteration_cleaners(text): + '''Pipeline for non-English text that transliterates to ASCII.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def english_cleaners(text): + '''Pipeline for English text, including abbreviation expansion.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_abbreviations(text) + phonemes = phonemize(text, language='en-us', backend='espeak', strip=True) + phonemes = collapse_whitespace(phonemes) + return phonemes + + +def english_cleaners2(text): + '''Pipeline for English text, including abbreviation expansion. + punctuation + stress''' + + +if __name__ == '__main__': + text_file = sys.argv[1] + phoneme_file = sys.argv[2] + + backend = EspeakBackend('en-us', preserve_punctuation=True, with_stress=True) + + buffer = "" + + out_file = open(phoneme_file, 'w') + for line in tqdm(open(text_file, errors='ignore').read().splitlines()): + try: + filepath, text, language, confidence = line.split('|') + confidence = float(confidence) + filename = os.path.basename(filepath).split('.')[0] + duration = float(filename.split('_')[-1]) / 1000 + + if language == "en": + phone = convert_to_ascii(text) + phone = lowercase(phone) + phone = expand_abbreviations(phone) + + phone = backend.phonemize([phone], strip=True)[0] + phone = collapse_whitespace(phone) + ratio = len(phone) / duration + else: + phone = "[blank]" + ratio = 0 + buffer += f"{filepath}|{text}|{phone}|{language}|{confidence:.3f}|{ratio:.3f}\n" + if len(buffer) > 100000: + out_file.write(buffer) + buffer = "" + except Exception as e: + print(filename, line, e) + continue + out_file.write(buffer) \ No newline at end of file diff --git a/data_pipeline/g2p_es.py b/data_pipeline/g2p_es.py new file mode 100644 index 0000000000000000000000000000000000000000..3514c142edf2388bfb8c5fdf568dcb4b56e5dea5 --- /dev/null +++ b/data_pipeline/g2p_es.py @@ -0,0 +1,143 @@ +import re +from unidecode import unidecode +from transformers import T5ForConditionalGeneration, AutoTokenizer +import matplotlib.pyplot as plt +import traceback +import sys +import os +from tqdm import tqdm +import numpy as np + + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r'\s+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def expand_numbers(text): + return normalize_numbers(text) + + +def lowercase(text): + return text.lower() + + +def collapse_whitespace(text): + return re.sub(_whitespace_re, ' ', text) + + +def convert_to_ascii(text): + return unidecode(text) + +puncs_to_remove = ["♪", "#", "¿", "¡", "-", "*"] +puncs_to_remove = "".join(puncs_to_remove) +def normalize(text): + text = text.translate(str.maketrans('', '', puncs_to_remove)) + text = text.strip() + return text + + +def basic_cleaners(text): + '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def transliteration_cleaners(text): + '''Pipeline for non-English text that transliterates to ASCII.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def english_cleaners(text): + '''Pipeline for English text, including abbreviation expansion.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_abbreviations(text) + phonemes = phonemize(text, language='en-us', backend='espeak', strip=True) + phonemes = collapse_whitespace(phonemes) + return phonemes + + +def english_cleaners2(text): + '''Pipeline for English text, including abbreviation expansion. + punctuation + stress''' + + +if __name__ == '__main__': + text_file = sys.argv[1] + phoneme_file = sys.argv[2] + + + model = T5ForConditionalGeneration.from_pretrained('charsiu/g2p_multilingual_byT5_tiny_16_layers_100') + #model.cuda() + tokenizer = AutoTokenizer.from_pretrained('google/byt5-small') + + buffer = "" + + out_file = open(phoneme_file, 'w') + for line in tqdm(open(text_file, errors='ignore').read().splitlines()): + try: + filepath, text, language, confidence = line.split('|') + confidence = float(confidence) + filename = os.path.basename(filepath).split('.')[0] + duration = float(filename.split('_')[-1]) / 1000 + + if language == "es": + #text = convert_to_ascii(text) + text = normalize(text) + text = lowercase(text) + print(text) + + words = text.split(' ') + words = [': '+i for i in words] + out = tokenizer(words,padding=True,add_special_tokens=False,return_tensors='pt') + + preds = model.generate(**out,num_beams=1,max_length=50) # We do not find beam search helpful. Greedy decoding is enough. + phone = tokenizer.batch_decode(preds.tolist(),skip_special_tokens=True) + phone = " ".join(phone) + print(phone) + + phone = collapse_whitespace(phone) + ratio = len(phone) / duration + else: + phone = "[blank]" + ratio = 0 + buffer += f"{filepath}|{text}|{phone}|{language}|{confidence:.3f}|{ratio:.3f}\n" + if len(buffer) > 100000: + out_file.write(buffer) + buffer = "" + #break + except Exception as e: + print(filename, line, e) + continue + out_file.write(buffer) \ No newline at end of file diff --git a/data_pipeline/merge_metrics.py b/data_pipeline/merge_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..dbbd0b8780b3972378a0897a01aea1eb252cce71 --- /dev/null +++ b/data_pipeline/merge_metrics.py @@ -0,0 +1,64 @@ + +import re +from unidecode import unidecode +from phonemizer import phonemize +from phonemizer.backend import EspeakBackend +import matplotlib.pyplot as plt +import traceback +import argparse +import os +from tqdm import tqdm +import numpy as np + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--phone", type=str, required=True) + parser.add_argument("--mos", type=str, required=True) + parser.add_argument("--spk", type=str, required=True) + parser.add_argument("--output", type=str, required=True) + args = parser.parse_args() + + ratios = [] + + mos_file = open(args.mos, 'r').read().splitlines() + mos = {} + for line in mos_file: + try: + file_path, mos_score = line.split('|') + filename = os.path.basename(file_path).split('.')[0] + mos[filename] = float(mos_score) + except: + print(line) + + spk_file = open(args.spk).read().splitlines() + spk = {} + for line in spk_file: + try: + file_path, score = line.split('|') + filename = os.path.basename(file_path).split('.')[0] + spk[filename] = float(score) + except: + print(line) + + buffer = "" + out_file = open(args.output, 'w') + for line in tqdm(open(args.phone, errors='ignore').read().splitlines()): + try: + filepath, text, phone, language, confidence, ratio = line.split('|') + confidence = float(confidence) + ratio = float(ratio) + filename = os.path.basename(filepath).split('.')[0] + mos_score = mos[filename] + spk_score = spk[filename] + + buffer += f"{filepath}|{text}|{phone}|{mos_score:.3f}|{language}|{confidence:.3f}|{spk_score:.3f}|{ratio:.3f}\n" + if len(buffer) > 100000: + out_file.write(buffer) + buffer = "" + ratios.append(ratio) + except Exception as e: + print(e, line) + traceback.print_exc() + continue + out_file.write(buffer) diff --git a/data_pipeline/pipeline.sh b/data_pipeline/pipeline.sh new file mode 100644 index 0000000000000000000000000000000000000000..8195d88383203622517195e2c7a9b7fbc478c38f --- /dev/null +++ b/data_pipeline/pipeline.sh @@ -0,0 +1,54 @@ +#input_dir=$PWD/$1 +input_dir=$1 +output_root=$2 +stage=${3:-0} +stop_stage=${4:-2} + +echo "from ${input_dir} to ${output_root}" +python3 --version + +set -euo pipefail + +# seperation & segmentation +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then +echo "Seperating..." +cd seperation +python3 inference_mp.py --filelist_or_dir $output_root/wav --out_dir $output_root --jobs 2 --ckpt_path /data/v-ziqianning/SingingTTS/data_pipeline/ckpts/bs_roformer +cd - +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then +echo "Segmenting..." +cd vad +python3 vad_webrtcvad.py --filelist_or_dir ${input_dir}/vocal --out_dir ${output_root}/ --jobs 16 +cd - +fi + +# ssl +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then +echo "Extracting SSL..." +cd ssl +python3 extract_xlsr.py $output_root/vocal_cut $output_root 2 # vocal +python3 extract_xlsr_6l.py $output_root/vocal_cut $output_root 2 # bgm +cd - +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then +echo "Quality Metrics..." +cd quality +python3 dnsmos_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 8 --ckpt_path /data/v-ziqianning/SingingTTS/data_pipeline/ckpts +python3 pyannote_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 8 +cd - +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then +echo "Extracting lyrics..." +cd asr +python3 faster_whisper_mp.py --filelist_or_dir $output_root/vocal_cut --text_path $output_root --jobs 2 +cd - +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then +python3 g2p_en.py $output_root/text.txt $output_root/phoneme.txt +python3 merge_metrics.py --phone $output_root/phoneme.txt --mos $output_root/dnsmos.txt --spk $output_root/spk.txt --output $output_root/data.txt +fi \ No newline at end of file diff --git a/data_pipeline/quality/dnsmos_mp.py b/data_pipeline/quality/dnsmos_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..caecbaf1602a9ed60255c3639fa74009e5841a70 --- /dev/null +++ b/data_pipeline/quality/dnsmos_mp.py @@ -0,0 +1,266 @@ +import sys +import os +import torch +import torch.multiprocessing as mp +import threading +import numpy as np +import glob +import argparse +import librosa +import soxr +from tqdm import tqdm +import traceback +import multiprocessing +#from speechmos import dnsmos +import onnxruntime as ort +os.environ["OMP_NUM_THREADS"] = "1" +#os.environ["MKL_NUM_THREADS"] = "1" + +file_lock = multiprocessing.Lock() + +SR = 16000 +INPUT_LENGTH = 9.01 +dnsmos = None + + +class DNSMOS: + def __init__(self, primary_model_path, p808_model_path, rank) -> None: + self.primary_model_path = primary_model_path + sess_opt = ort.SessionOptions() + sess_opt.intra_op_num_threads = 1 + sess_opt.inter_op_num_threads = 1 + sess_opt.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL + #providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),})] + #providers = ["CUDAExecutionProvider"] + #providers = ["CPUExecutionProvider"] + providers = [ + ('CUDAExecutionProvider', { + 'device_id': rank, + }), + 'CPUExecutionProvider', + ] + #self.onnx_sess = ort.InferenceSession(self.primary_model_path, sess_opt, providers=providers) + self.p808_onnx_sess = ort.InferenceSession(p808_model_path, sess_opt, providers=providers) + #print(self.p808_onnx_sess.get_providers()) + + def audio_melspec(self, audio, n_mels=120, frame_size=320, hop_length=160, sr=16000, to_db=True): + mel_spec = librosa.feature.melspectrogram( + y=audio, sr=sr, n_fft=frame_size + 1, hop_length=hop_length, n_mels=n_mels) + if to_db: + mel_spec = (librosa.power_to_db(mel_spec, ref=np.max) + 40) / 40 + return mel_spec.T + + def get_polyfit_val(self, sig, bak, ovr, is_personalized_MOS): + if is_personalized_MOS: + p_ovr = np.poly1d( + [-0.00533021, 0.005101, 1.18058466, -0.11236046]) + p_sig = np.poly1d( + [-0.01019296, 0.02751166, 1.19576786, -0.24348726]) + p_bak = np.poly1d( + [-0.04976499, 0.44276479, -0.1644611, 0.96883132]) + else: + p_ovr = np.poly1d([-0.06766283, 1.11546468, 0.04602535]) + p_sig = np.poly1d([-0.08397278, 1.22083953, 0.0052439]) + p_bak = np.poly1d([-0.13166888, 1.60915514, -0.39604546]) + + sig_poly = p_sig(sig) + bak_poly = p_bak(bak) + ovr_poly = p_ovr(ovr) + + return sig_poly, bak_poly, ovr_poly + + def __call__(self, sample, fs, is_personalized_MOS): + clip_dict = {} + if isinstance(sample, np.ndarray): + audio = sample + if not ((audio >= -1).all() and (audio <= 1).all()): + raise ValueError("np.ndarray values must be between -1 and 1.") + elif isinstance(sample, str) and os.path.isfile(sample): + audio, _ = librosa.load(sample, sr=fs) + clip_dict['filename'] = sample + else: + raise ValueError( + f"Input must be a numpy array or a path to an audio file.") + + len_samples = int(INPUT_LENGTH * fs) + while len(audio) < len_samples: + audio = np.append(audio, audio) + + num_hops = int(np.floor(len(audio) / fs) - INPUT_LENGTH) + 1 + hop_len_samples = fs + predicted_mos_sig_seg = [] + predicted_mos_bak_seg = [] + predicted_mos_ovr_seg = [] + predicted_p808_mos = [] + + for idx in range(num_hops): + audio_seg = audio[int(idx * hop_len_samples): int((idx + INPUT_LENGTH) * hop_len_samples)] + if len(audio_seg) < len_samples: + continue + + input_features = np.array(audio_seg).astype( + 'float32')[np.newaxis, :] + p808_input_features = np.array(self.audio_melspec( + audio=audio_seg[:-160])).astype('float32')[np.newaxis, :, :] + oi = {'input_1': input_features} + p808_oi = {'input_1': p808_input_features} + p808_mos = self.p808_onnx_sess.run(None, p808_oi)[0][0][0] + #mos_sig_raw, mos_bak_raw, mos_ovr_raw = self.onnx_sess.run(None, oi)[ + # 0][0] + #mos_sig, mos_bak, mos_ovr = self.get_polyfit_val( + # mos_sig_raw, mos_bak_raw, mos_ovr_raw, is_personalized_MOS) + #predicted_mos_sig_seg.append(mos_sig) + #predicted_mos_bak_seg.append(mos_bak) + #predicted_mos_ovr_seg.append(mos_ovr) + predicted_p808_mos.append(p808_mos) + + #clip_dict['ovrl_mos'] = np.mean(predicted_mos_ovr_seg) + #clip_dict['sig_mos'] = np.mean(predicted_mos_sig_seg) + #clip_dict['bak_mos'] = np.mean(predicted_mos_bak_seg) + clip_dict['p808_mos'] = np.mean(predicted_p808_mos) + return clip_dict + +def normalize_audio(y, target_dbfs=0): + max_amplitude = np.max(np.abs(y)) + if max_amplitude < 0.1: + return y + + target_amplitude = 10.0**(target_dbfs / 20.0) + scale_factor = target_amplitude / max_amplitude + #print(max_amplitude, target_amplitude, scale_factor) + + normalized_audio = y * scale_factor + + return normalized_audio + + +def inference(rank, ckpt_dir, text_path, queue: mp.Queue): + p808_model_path = os.path.join(ckpt_dir, 'dnsmos_p808.onnx') + primary_model_path = os.path.join(ckpt_dir, 'sig_bak_ovr.onnx') + dnsmos = DNSMOS(primary_model_path, p808_model_path, rank) + + def write_to_file(data): + with file_lock: + with open(text_path, 'a') as f: + f.write(data) + + buffer = "" + + with torch.no_grad(): + while True: + #print(texts) + filename = queue.get() + if filename is None: + write_to_file(buffer) + break + try: + filename = filename[0] + audio_path = filename + wav, sr = librosa.load(audio_path, sr=None) + wav = normalize_audio(wav, -6) + wav = soxr.resample( + wav, # 1D(mono) or 2D(frames, channels) array input + sr, # input samplerate + 16000 # target samplerate + ) + if wav.min() < -1 or wav.min() > 1: + print(audio_path) + mos_dict = dnsmos(wav, 16000, False) + p808_mos = mos_dict['p808_mos'] + buffer += f"{filename}|{p808_mos:3}\n" + if len(buffer) > 10000: + write_to_file(buffer) + buffer = "" + except Exception as e: + print(audio_path) + traceback.print_exc() + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + +last_batches = None + +@setInterval(5) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + + +if __name__ == "__main__": + #audio_dir = sys.argv[1] + parser = argparse.ArgumentParser() + parser.add_argument("--filelist_or_dir", type=str, required=True) + parser.add_argument("--text_path", type=str, required=True, help="Dir to save output") + parser.add_argument("--jobs", type=int, required=False, default=2) + parser.add_argument("--log_dir", type=str, required=False, help="For aml compatibility") + parser.add_argument("--model_dir", type=str, required=False, help="For aml compatibility") + parser.add_argument("--ckpt_path", type=str, required=False, default=".") + args = parser.parse_args() + + mp.set_start_method('spawn',force=True) + + filelist_or_dir = args.filelist_or_dir + text_path = args.text_path + jobs = args.jobs + ckpt_path = args.ckpt_path + os.makedirs(text_path, exist_ok=True) + + if os.path.isfile(filelist_or_dir): + filelist_name = filelist_or_dir.split('/')[-1].split('.')[0] + generator = open(filelist_or_dir).read().splitlines() + text_path = os.path.join(text_path, f"{filelist_name}_dnsmos.txt") + else: + filelist_name = "single" + generator = glob.glob(f"{filelist_or_dir}/*.wav") + text_path = os.path.join(text_path, "dnsmos.txt") + + os.system(f"rm {text_path}") + + gpu_num = torch.cuda.device_count() + + processes = [] + queue = mp.Queue() + for thread_num in range(jobs): + + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, ckpt_path, text_path, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + for filename in generator: + accum.append(filename) + if len(accum) == 1: + queue.put(accum.copy()) + accum.clear() + + + for _ in range(jobs): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc='dnsmos') + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() \ No newline at end of file diff --git a/data_pipeline/quality/pyannote_mp.py b/data_pipeline/quality/pyannote_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..fa96f40c6a27bf30721c4e4c1714f6ae57bfed99 --- /dev/null +++ b/data_pipeline/quality/pyannote_mp.py @@ -0,0 +1,152 @@ +import sys +import os +import torch +import torch.multiprocessing as mp +import multiprocessing +import threading +import numpy as np +import glob +import argparse +from tqdm import tqdm +from collections import defaultdict +import traceback +from pyannote.audio import Pipeline + +file_lock = multiprocessing.Lock() + + +def inference(rank, text_path, queue: mp.Queue): + device=f"cuda:{rank}" + pipeline = Pipeline.from_pretrained( + "pyannote/speaker-diarization-3.1", + use_auth_token="Your huggingface token") + pipeline.to(torch.device(device)) + + def write_to_file(data): + with file_lock: + with open(text_path, 'a') as f: + f.write(data) + + buffer = "" + + with torch.no_grad(): + while True: + #print(texts) + filename = queue.get() + if filename is None: + write_to_file(buffer) + break + try: + filename = filename[0] + audio_path = filename + + spks = defaultdict(float) + total_duration = 0. + + diarization = pipeline(audio_path) + for turn, _, speaker in diarization.itertracks(yield_label=True): + duration = turn.end - turn.start + spks[speaker] += duration + total_duration += duration + + if len(spks) == 0: + percentage = 0. + else: + sorted_spks = sorted(spks.items(), key=lambda s:s[1], reverse=True) + percentage = sorted_spks[0][1] / total_duration + + buffer += f"{filename}|{percentage:3}\n" + if len(buffer) > 10000: + write_to_file(buffer) + buffer = "" + except Exception as e: + #print(sorted_spks) + traceback.print_exc() + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + +last_batches = None + +@setInterval(5) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + + +if __name__ == "__main__": + #audio_dir = sys.argv[1] + parser = argparse.ArgumentParser() + parser.add_argument("--filelist_or_dir", type=str, required=True) + parser.add_argument("--text_path", type=str, required=True, help="Dir to save output") + parser.add_argument("--jobs", type=int, required=False, default=2) + parser.add_argument("--log_dir", type=str, required=False, help="For aml compatibility") + parser.add_argument("--model_dir", type=str, required=False, help="For aml compatibility") + args = parser.parse_args() + + mp.set_start_method('spawn',force=True) + + filelist_or_dir = args.filelist_or_dir + text_path = args.text_path + jobs = args.jobs + os.makedirs(text_path, exist_ok=True) + + if os.path.isfile(filelist_or_dir): + filelist_name = filelist_or_dir.split('/')[-1].split('.')[0] + generator = open(filelist_or_dir).read().splitlines() + text_path = os.path.join(text_path, f"{filelist_name}_spk.txt") + else: + filelist_name = "single" + generator = glob.glob(f"{filelist_or_dir}/*.wav") + text_path = os.path.join(text_path, "spk.txt") + + os.system(f"rm {text_path}") + + gpu_num = torch.cuda.device_count() + + processes = [] + queue = mp.Queue() + for thread_num in range(jobs): + + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, text_path, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + for filename in generator: + accum.append(filename) + if len(accum) == 1: + queue.put(accum.copy()) + accum.clear() + + + for _ in range(jobs): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc='pyannote') + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() \ No newline at end of file diff --git a/data_pipeline/requirements.txt b/data_pipeline/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8bcb8a318d50eac64320cdd6e31ee41d5dc50d6a --- /dev/null +++ b/data_pipeline/requirements.txt @@ -0,0 +1,27 @@ +torch +numpy +pandas +scipy +soundfile +ml_collections +tqdm +segmentation_models_pytorch==0.3.3 +timm==0.9.2 +audiomentations==0.24.0 +pedalboard==0.8.1 +omegaconf==2.2.3 +beartype==0.14.1 +rotary_embedding_torch==0.3.5 +einops==0.6.1 +librosa +demucs==4.0.0 +transformers==4.35.0 +torchmetrics==0.11.4 +spafe==0.3.2 +protobuf==3.20.3 +torch_audiomentations +asteroid==0.7.0 +auraloss +pyannote.audio +webrtcvad +faster-whisper==0.10.1 diff --git a/data_pipeline/seperation/README.md b/data_pipeline/seperation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d544b1443b91ccdecdd604a9dd79578a5247b377 --- /dev/null +++ b/data_pipeline/seperation/README.md @@ -0,0 +1,139 @@ +# Music Source Separation Universal Training Code + +Repository for training models for music source separation. Repository is based on [kuielab code](https://github.com/kuielab/sdx23/tree/mdx_AB/my_submission/src) for [SDX23 challenge](https://github.com/kuielab/sdx23/tree/mdx_AB/my_submission/src). The main idea of this repository is to create training code, which is easy to modify for experiments. Brought to you by [MVSep.com](https://mvsep.com). + +## Models + +Model can be chosen with `--model_type` arg. + +Available models for training: +* MDX23C based on [KUIELab TFC TDF v3 architecture](https://github.com/kuielab/sdx23/). Key: `mdx23c`. +* Demucs4HT [[Paper](https://arxiv.org/abs/2211.08553)]. Key: `htdemucs`. +* VitLarge23 based on [Segmentation Models Pytorch](https://github.com/qubvel/segmentation_models.pytorch). Key: `segm_models`. +* Band Split RoFormer [[Paper](https://arxiv.org/abs/2309.02612), [Repository](https://github.com/lucidrains/BS-RoFormer)] . Key: `bs_roformer`. +* Mel-Band RoFormer [[Paper](https://arxiv.org/abs/2310.01809), [Repository](https://github.com/lucidrains/BS-RoFormer)]. Key: `mel_band_roformer`. +* Swin Upernet [[Paper](https://arxiv.org/abs/2103.14030)] Key: `swin_upernet`. +* BandIt Plus [[Paper](https://arxiv.org/abs/2309.02539), [Repository](https://github.com/karnwatcharasupat/bandit)] Key: `bandit`. +* SCNet [[Paper](https://arxiv.org/abs/2401.13276), [Official Repository](https://github.com/starrytong/SCNet), [Unofficial Repository](https://github.com/amanteur/SCNet-PyTorch)] Key: `scnet`. + + **Note 1**: For `segm_models` there are many different encoders is possible. [Look here](https://github.com/qubvel/segmentation_models.pytorch#encoders-). + + **Note 2**: Thanks to [@lucidrains](https://github.com/lucidrains) for recreating the RoFormer models based on papers. + +## How to train + +To train model you need to: + +1) Choose model type with key `--model_type`. Possible values: `mdx23c`, `htdemucs`, `segm_models`, `mel_band_roformer`, `bs_roformer`. +2) Choose location of config for model `--config_path` ``. You can find examples of configs in [configs folder](configs/). Prefixes `config_musdb18_` are examples for [MUSDB18 dataset](https://sigsep.github.io/datasets/musdb.html). +3) If you have some check-point from the same model or from the similar model you can use it with: `--start_check_point` `` +4) Choose path where to store results of training `--results_path` `` + +#### Example +```bash +python train.py \ + --model_type mel_band_roformer \ + --config_path configs/config_mel_band_roformer_vocals.yaml \ + --start_check_point results/model.ckpt \ + --results_path results/ \ + --data_path 'datasets/dataset1' 'datasets/dataset2' \ + --valid_path datasets/musdb18hq/test \ + --num_workers 4 \ + --device_ids 0 +``` + +All available training parameters you can find [here](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/train.py#L109). + +## How to inference + +#### Example + +```bash +python inference.py \ + --model_type mdx23c \ + --config_path configs/config_mdx23c_musdb18.yaml \ + --start_check_point results/last_mdx23c.ckpt \ + --input_folder input/wavs/ \ + --store_dir separation_results/ +``` + +All available inference parameters you can find [here](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/inference.py#L54). + +## Useful notes + +* All batch sizes in config are adjusted to use with single NVIDIA A6000 48GB. If you have less memory please adjust correspodningly in model config `training.batch_size` and `training.gradient_accumulation_steps`. +* It's usually always better to start with old weights even if shapes not fully match. Code supports loading weights for not fully same models (but it must have the same architecture). Training will be much faster. + +## Code description + +* `configs/config_*.yaml` - configuration files for models +* `models/*` - set of available models for training and inference +* `dataset.py` - dataset which creates new samples for training +* `inference.py` - process folder with music files and separate them +* `train.py` - main training code +* `utils.py` - common functions used by train/valid +* `valid.py` - validation of model with metrics + + +## Pre-trained models + +If you trained some good models, please, share them. You can post config and model weights [in this issue](https://github.com/ZFTurbo/Music-Source-Separation-Training/issues/1). + +### Vocal models +| Model Type | Instruments | Metrics (SDR) | Config | Checkpoint | +|:----------------------------------------------------------------:|:-------------:|:-----------------:|:-----:|:-----:| +| MDX23C | vocals / other | SDR vocals: 10.17 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_mdx23c.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_mdx23c_sdr_10.17.ckpt) | +| HTDemucs4 (MVSep finetuned) | vocals / other | SDR vocals: 8.78 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_htdemucs.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_htdemucs_sdr_8.78.ckpt) | +| Segm Models (VitLarge23) | vocals / other | SDR vocals: 9.77 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_segm_models.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_segm_models_sdr_9.77.ckpt) | +| Mel Band RoFormer | vocals (*) / other | SDR vocals: 8.42 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/config_vocals_mel_band_roformer.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.0/model_vocals_mel_band_roformer_sdr_8.42.ckpt) | +| Swin Upernet | vocals / other | SDR vocals: 7.57 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.2/config_vocals_swin_upernet.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.2/model_swin_upernet_ep_56_sdr_10.6703.ckpt) | +| BS Roformer ([viperx](https://github.com/playdasegunda) edition) | vocals / other | SDR vocals: 10.87 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_bs_roformer_ep_317_sdr_12.9755.ckpt) | +| Mel Band Roformer ([viperx](https://github.com/playdasegunda) edition) | vocals / other | SDR vocals: 9.67 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_mel_band_roformer_ep_3005_sdr_11.4360.ckpt) | + +**Note**: Metrics measured on [Multisong Dataset](https://mvsep.com/en/quality_checker). + +### Single stem models +| Model Type | Instruments | Metrics (SDR) | Config | Checkpoint | +|:-----------------------------------------:|:-------------:|:----------------:|:-----:|:-----:| +| HTDemucs4 FT Drums | drums | SDR drums: 11.13 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/f7e0c4bc-ba3fe64a.th) | +| HTDemucs4 FT Bass | bass | SDR bass: 11.96 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/d12395a8-e57c48e6.th) | +| HTDemucs4 FT Other | other | SDR other: 5.85 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/92cfc3b6-ef3bcb9c.th) | +| HTDemucs4 FT Vocals (Official repository) | vocals | SDR vocals: 8.38 | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/04573f0d-f3cf25b2.th) | +| BS Roformer ([viperx](https://github.com/playdasegunda) edition) | other | SDR other: 6.85 | [Config](https://raw.githubusercontent.com/ZFTurbo/Music-Source-Separation-Training/main/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml) | [Weights](https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/model_bs_roformer_ep_937_sdr_10.5309.ckpt) | + +**Note**: All models output 4 stems, but quality is best only on target stem (all other stems are dummy). + +### Multi-stem models + +| Model Type | Instruments | Metrics (SDR) | Config | Checkpoint | +|:-------------------:|:-------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:-----:|:-----:| +| MDX23C~~*~~ | bass / drums / vocals / other | MUSDB test avg: 7.15 (bass: 5.77, drums: 7.93 vocals: 9.23 other: 5.68) Multisong avg: 7.02 (bass: 8.40, drums: 7.73 vocals: 7.36 other: 4.57) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.1/config_musdb18_mdx23c.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v1.0.1/model_mdx23c_ep_168_sdr_7.0207.ckpt) | +| BandIt Plus | speech / music / effects | DnR test avg: 11.50 (speech: 15.64, music: 9.18 effects: 9.69) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v.1.0.3/config_dnr_bandit_bsrnn_multi_mus64.yaml) | [Weights](https://github.com/ZFTurbo/Music-Source-Separation-Training/releases/download/v.1.0.3/model_bandit_plus_dnr_sdr_11.47.chpt) | +| HTDemucs4 | bass / drums / vocals / other | Multisong avg: 9.16 (bass: 11.76, drums: 10.88 vocals: 8.24 other: 5.74) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_htdemucs.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/955717e8-8726e21a.th) | +| HTDemucs4 (6 stems) | bass / drums / vocals / other / piano / guitar | Multisong (bass: 11.22, drums: 10.22 vocals: 8.05 other: --- piano: --- guitar: ---) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_htdemucs_6stems.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/5c90dfd2-34c22ccb.th) | +| Demucs3 mmi | bass / drums / vocals / other | Multisong avg: 8.88 (bass: 11.17, drums: 10.70 vocals: 8.22 other: 5.42) | [Config](https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/configs/config_musdb18_demucs3_mmi.yaml) | [Weights](https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/75fc33f5-1941ce65.th) | + +~~*~~ **Note**: Model was trained only on MUSDB18HQ dataset (100 songs train data) + +## Dataset types + +Look here: [Dataset types](docs/dataset_types.md) + +## Augmentations + +Look here: [Augmentations](docs/augmentations.md) + +## Citation + +* [arxiv paper](https://arxiv.org/abs/2305.07489) + +``` +@misc{solovyev2023benchmarks, + title={Benchmarks and leaderboards for sound demixing tasks}, + author={Roman Solovyev and Alexander Stempkovskiy and Tatiana Habruseva}, + year={2023}, + eprint={2305.07489}, + archivePrefix={arXiv}, + primaryClass={cs.SD} +} +``` \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_dnr_bandit_bsrnn_multi_mus64.yaml b/data_pipeline/seperation/configs/config_dnr_bandit_bsrnn_multi_mus64.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2392ca496e498e57a99d70f1f28f73fe3dd7c432 --- /dev/null +++ b/data_pipeline/seperation/configs/config_dnr_bandit_bsrnn_multi_mus64.yaml @@ -0,0 +1,78 @@ +name: "MultiMaskMultiSourceBandSplitRNN" +audio: + chunk_size: 264600 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + in_channel: 1 + stems: ['speech', 'music', 'effects'] + band_specs: "musical" + n_bands: 64 + fs: 44100 + require_no_overlap: false + require_no_gap: true + normalize_channel_independently: false + treat_channel_as_feature: true + n_sqm_modules: 8 + emb_dim: 128 + rnn_dim: 256 + bidirectional: true + rnn_type: "GRU" + mlp_dim: 512 + hidden_activation: "Tanh" + hidden_activation_kwargs: null + complex_mask: true + n_fft: 2048 + win_length: 2048 + hop_length: 512 + window_fn: "hann_window" + wkwargs: null + power: null + center: true + normalized: true + pad_mode: "constant" + onesided: true + +training: + batch_size: 4 + gradient_accumulation_steps: 4 + grad_clip: 0 + instruments: + - speech + - music + - effects + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_htdemucs_6stems.yaml b/data_pipeline/seperation/configs/config_htdemucs_6stems.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d06a489ec66794414dedd4c143f6e937b26ce666 --- /dev/null +++ b/data_pipeline/seperation/configs/config_htdemucs_6stems.yaml @@ -0,0 +1,127 @@ +audio: + chunk_size: 485100 # samplerate * segment + min_mean_abs: 0.001 + hop_length: 1024 + +training: + batch_size: 8 + gradient_accumulation_steps: 1 + grad_clip: 0 + segment: 11 + shift: 1 + samplerate: 44100 + channels: 2 + normalize: true + instruments: ['drums', 'bass', 'other', 'vocals', 'guitar', 'piano'] + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + optimizer: adam + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: [0.2, 0.02] + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + +inference: + num_overlap: 4 + batch_size: 8 + +model: htdemucs + +htdemucs: # see demucs/htdemucs.py for a detailed description + # Channels + channels: 48 + channels_time: + growth: 2 + # STFT + num_subbands: 1 + nfft: 4096 + wiener_iters: 0 + end_iters: 0 + wiener_residual: false + cac: true + # Main structure + depth: 4 + rewrite: true + # Frequency Branch + multi_freqs: [] + multi_freqs_depth: 3 + freq_emb: 0.2 + emb_scale: 10 + emb_smooth: true + # Convolutions + kernel_size: 8 + stride: 4 + time_stride: 2 + context: 1 + context_enc: 0 + # normalization + norm_starts: 4 + norm_groups: 4 + # DConv residual branch + dconv_mode: 3 + dconv_depth: 2 + dconv_comp: 8 + dconv_init: 1e-3 + # Before the Transformer + bottom_channels: 0 + # CrossTransformer + # ------ Common to all + # Regular parameters + t_layers: 5 + t_hidden_scale: 4.0 + t_heads: 8 + t_dropout: 0.0 + t_layer_scale: True + t_gelu: True + # ------------- Positional Embedding + t_emb: sin + t_max_positions: 10000 # for the scaled embedding + t_max_period: 10000.0 + t_weight_pos_embed: 1.0 + t_cape_mean_normalize: True + t_cape_augment: True + t_cape_glob_loc_scale: [5000.0, 1.0, 1.4] + t_sin_random_shift: 0 + # ------------- norm before a transformer encoder + t_norm_in: True + t_norm_in_group: False + # ------------- norm inside the encoder + t_group_norm: False + t_norm_first: True + t_norm_out: True + # ------------- optim + t_weight_decay: 0.0 + t_lr: + # ------------- sparsity + t_sparse_self_attn: False + t_sparse_cross_attn: False + t_mask_type: diag + t_mask_random_seed: 42 + t_sparse_attn_window: 400 + t_global_window: 100 + t_sparsity: 0.95 + t_auto_sparsity: False + # Cross Encoder First (False) + t_cross_first: False + # Weight init + rescale: 0.1 + diff --git a/data_pipeline/seperation/configs/config_musdb18_bs_roformer.yaml b/data_pipeline/seperation/configs/config_musdb18_bs_roformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aba1e8aa1fb1d998eadd79e5010831e7229053c2 --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_bs_roformer.yaml @@ -0,0 +1,134 @@ +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 256 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dim: 192 + depth: 6 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: true + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 10 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - bass + - drums + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_musdb18_demucs3_mmi.yaml b/data_pipeline/seperation/configs/config_musdb18_demucs3_mmi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08c25c50f8f747d0e4af7acae68b1e47a01f3d0c --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_demucs3_mmi.yaml @@ -0,0 +1,72 @@ +audio: + chunk_size: 485100 # samplerate * segment + min_mean_abs: 0.000 + hop_length: 1024 + +training: + batch_size: 8 + gradient_accumulation_steps: 1 + grad_clip: 0 + segment: 11 + shift: 1 + samplerate: 44100 + channels: 2 + normalize: true + instruments: ['drums', 'bass', 'other', 'vocals'] + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + optimizer: adam + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: false # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + +inference: + num_overlap: 4 + batch_size: 8 + +model: hdemucs + +hdemucs: # see demucs/hdemucs.py for a detailed description + channels: 48 + channels_time: null + growth: 2 + nfft: 4096 + wiener_iters: 0 + end_iters: 0 + wiener_residual: False + cac: True + depth: 6 + rewrite: True + hybrid: True + hybrid_old: False + multi_freqs: [] + multi_freqs_depth: 3 + freq_emb: 0.2 + emb_scale: 10 + emb_smooth: True + kernel_size: 8 + stride: 4 + time_stride: 2 + context: 1 + context_enc: 0 + norm_starts: 4 + norm_groups: 4 + dconv_mode: 1 + dconv_depth: 2 + dconv_comp: 4 + dconv_attn: 4 + dconv_lstm: 4 + dconv_init: 0.001 + rescale: 0.1 diff --git a/data_pipeline/seperation/configs/config_musdb18_htdemucs.yaml b/data_pipeline/seperation/configs/config_musdb18_htdemucs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba635367baca0b58a977fa4bb38a1cec99579ca9 --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_htdemucs.yaml @@ -0,0 +1,119 @@ +audio: + chunk_size: 485100 # samplerate * segment + min_mean_abs: 0.001 + hop_length: 1024 + +training: + batch_size: 8 + gradient_accumulation_steps: 1 + grad_clip: 0 + segment: 11 + shift: 1 + samplerate: 44100 + channels: 2 + normalize: true + instruments: ['drums', 'bass', 'other', 'vocals'] + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + optimizer: adam + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + +inference: + num_overlap: 4 + batch_size: 8 + +model: htdemucs + +htdemucs: # see demucs/htdemucs.py for a detailed description + # Channels + channels: 48 + channels_time: + growth: 2 + # STFT + num_subbands: 1 + nfft: 4096 + wiener_iters: 0 + end_iters: 0 + wiener_residual: false + cac: true + # Main structure + depth: 4 + rewrite: true + # Frequency Branch + multi_freqs: [] + multi_freqs_depth: 3 + freq_emb: 0.2 + emb_scale: 10 + emb_smooth: true + # Convolutions + kernel_size: 8 + stride: 4 + time_stride: 2 + context: 1 + context_enc: 0 + # normalization + norm_starts: 4 + norm_groups: 4 + # DConv residual branch + dconv_mode: 3 + dconv_depth: 2 + dconv_comp: 8 + dconv_init: 1e-3 + # Before the Transformer + bottom_channels: 512 + # CrossTransformer + # ------ Common to all + # Regular parameters + t_layers: 5 + t_hidden_scale: 4.0 + t_heads: 8 + t_dropout: 0.0 + t_layer_scale: True + t_gelu: True + # ------------- Positional Embedding + t_emb: sin + t_max_positions: 10000 # for the scaled embedding + t_max_period: 10000.0 + t_weight_pos_embed: 1.0 + t_cape_mean_normalize: True + t_cape_augment: True + t_cape_glob_loc_scale: [5000.0, 1.0, 1.4] + t_sin_random_shift: 0 + # ------------- norm before a transformer encoder + t_norm_in: True + t_norm_in_group: False + # ------------- norm inside the encoder + t_group_norm: False + t_norm_first: True + t_norm_out: True + # ------------- optim + t_weight_decay: 0.0 + t_lr: + # ------------- sparsity + t_sparse_self_attn: False + t_sparse_cross_attn: False + t_mask_type: diag + t_mask_random_seed: 42 + t_sparse_attn_window: 400 + t_global_window: 100 + t_sparsity: 0.95 + t_auto_sparsity: False + # Cross Encoder First (False) + t_cross_first: False + # Weight init + rescale: 0.1 + diff --git a/data_pipeline/seperation/configs/config_musdb18_mdx23c.yaml b/data_pipeline/seperation/configs/config_musdb18_mdx23c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73631f7293c8db94c55c7e1db9fdfc79c712d6e0 --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_mdx23c.yaml @@ -0,0 +1,182 @@ +audio: + chunk_size: 261120 + dim_f: 4096 + dim_t: 256 + hop_length: 1024 + n_fft: 8192 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + act: gelu + bottleneck_factor: 4 + growth: 128 + norm: InstanceNorm + num_blocks_per_scale: 2 + num_channels: 128 + num_scales: 5 + num_subbands: 4 + scale: + - 2 + - 2 + +training: + batch_size: 6 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - bass + - drums + - other + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + + # apply mp3 compression to mixture only (emulate downloading mp3 from internet) + mp3_compression_on_mixture: 0.01 + mp3_compression_on_mixture_bitrate_min: 32 + mp3_compression_on_mixture_bitrate_max: 320 + mp3_compression_on_mixture_backend: "lameenc" + + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + mp3_compression: 0.01 + mp3_compression_min_bitrate: 32 + mp3_compression_max_bitrate: 320 + mp3_compression_backend: "lameenc" + + # pedalboard reverb block + pedalboard_reverb: 0.01 + pedalboard_reverb_room_size_min: 0.1 + pedalboard_reverb_room_size_max: 0.9 + pedalboard_reverb_damping_min: 0.1 + pedalboard_reverb_damping_max: 0.9 + pedalboard_reverb_wet_level_min: 0.1 + pedalboard_reverb_wet_level_max: 0.9 + pedalboard_reverb_dry_level_min: 0.1 + pedalboard_reverb_dry_level_max: 0.9 + pedalboard_reverb_width_min: 0.9 + pedalboard_reverb_width_max: 1.0 + + # pedalboard chorus block + pedalboard_chorus: 0.01 + pedalboard_chorus_rate_hz_min: 1.0 + pedalboard_chorus_rate_hz_max: 7.0 + pedalboard_chorus_depth_min: 0.25 + pedalboard_chorus_depth_max: 0.95 + pedalboard_chorus_centre_delay_ms_min: 3 + pedalboard_chorus_centre_delay_ms_max: 10 + pedalboard_chorus_feedback_min: 0.0 + pedalboard_chorus_feedback_max: 0.5 + pedalboard_chorus_mix_min: 0.1 + pedalboard_chorus_mix_max: 0.9 + + # pedalboard phazer block + pedalboard_phazer: 0.01 + pedalboard_phazer_rate_hz_min: 1.0 + pedalboard_phazer_rate_hz_max: 10.0 + pedalboard_phazer_depth_min: 0.25 + pedalboard_phazer_depth_max: 0.95 + pedalboard_phazer_centre_frequency_hz_min: 200 + pedalboard_phazer_centre_frequency_hz_max: 12000 + pedalboard_phazer_feedback_min: 0.0 + pedalboard_phazer_feedback_max: 0.5 + pedalboard_phazer_mix_min: 0.1 + pedalboard_phazer_mix_max: 0.9 + + # pedalboard distortion block + pedalboard_distortion: 0.01 + pedalboard_distortion_drive_db_min: 1.0 + pedalboard_distortion_drive_db_max: 25.0 + + # pedalboard pitch shift block + pedalboard_pitch_shift: 0.01 + pedalboard_pitch_shift_semitones_min: -7 + pedalboard_pitch_shift_semitones_max: 7 + + # pedalboard resample block + pedalboard_resample: 0.01 + pedalboard_resample_target_sample_rate_min: 4000 + pedalboard_resample_target_sample_rate_max: 44100 + + # pedalboard bitcrash block + pedalboard_bitcrash: 0.01 + pedalboard_bitcrash_bit_depth_min: 4 + pedalboard_bitcrash_bit_depth_max: 16 + + # pedalboard mp3 compressor block + pedalboard_mp3_compressor: 0.01 + pedalboard_mp3_compressor_pedalboard_mp3_compressor_min: 0 + pedalboard_mp3_compressor_pedalboard_mp3_compressor_max: 9.999 + + vocals: + pitch_shift: 0.1 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.1 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.7 + bass: + pitch_shift: 0.1 + pitch_shift_min_semitones: -2 + pitch_shift_max_semitones: 2 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -3 + seven_band_parametric_eq_max_gain_db: 6 + tanh_distortion: 0.2 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.5 + drums: + pitch_shift: 0.33 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.33 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.6 + other: + pitch_shift: 0.1 + pitch_shift_min_semitones: -4 + pitch_shift_max_semitones: 4 + gaussian_noise: 0.1 + gaussian_noise_min_amplitude: 0.001 + gaussian_noise_max_amplitude: 0.015 + time_stretch: 0.01 + time_stretch_min_rate: 0.8 + time_stretch_max_rate: 1.25 + + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_musdb18_mel_band_roformer.yaml b/data_pipeline/seperation/configs/config_musdb18_mel_band_roformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5f0a3e248014330ea232f3e32c7b176759a7fd0 --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_mel_band_roformer.yaml @@ -0,0 +1,73 @@ +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 256 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dim: 192 + depth: 8 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + num_bands: 60 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: True + dim_freqs_in: 1025 + sample_rate: 44100 # needed for mel filter bank from librosa + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: False + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 7 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - bass + - drums + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 diff --git a/data_pipeline/seperation/configs/config_musdb18_scnet.yaml b/data_pipeline/seperation/configs/config_musdb18_scnet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb683c53f6d6a087a312ca326aaef5e384aabe8d --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_scnet.yaml @@ -0,0 +1,64 @@ +audio: + chunk_size: 264600 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dims: [4, 32, 64, 128] + bandsplit_ratios: [.175, .392, .433] + downsample_strides: [1, 4, 16] + n_conv_modules: [3, 2, 1] + n_rnn_layers: 6 + rnn_hidden_dim: 128 + n_sources: 4 + + n_fft: 4096 + hop_length: 1024 + win_length: 4096 + stft_normalized: false + + use_mamba: true + d_state: 16 + d_conv: 4 + d_expand: 2 + +training: + batch_size: 10 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - bass + - drums + - other + lr: 5.0e-04 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: + !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 diff --git a/data_pipeline/seperation/configs/config_musdb18_segm_models.yaml b/data_pipeline/seperation/configs/config_musdb18_segm_models.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cbec03910a628bd83c6f42f3984f5d9ba732a9fd --- /dev/null +++ b/data_pipeline/seperation/configs/config_musdb18_segm_models.yaml @@ -0,0 +1,92 @@ +audio: + chunk_size: 261632 + dim_f: 4096 + dim_t: 512 + hop_length: 512 + n_fft: 8192 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders- + decoder_type: unet # unet, fpn + act: gelu + num_channels: 128 + num_subbands: 8 + +training: + batch_size: 7 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - bass + - drums + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 2000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adamw + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + + # apply mp3 compression to mixture only (emulate downloading mp3 from internet) + mp3_compression_on_mixture: 0.01 + mp3_compression_on_mixture_bitrate_min: 32 + mp3_compression_on_mixture_bitrate_max: 320 + mp3_compression_on_mixture_backend: "lameenc" + + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + mp3_compression: 0.01 + mp3_compression_min_bitrate: 32 + mp3_compression_max_bitrate: 320 + mp3_compression_backend: "lameenc" + + vocals: + pitch_shift: 0.1 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.1 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.7 + other: + pitch_shift: 0.1 + pitch_shift_min_semitones: -4 + pitch_shift_max_semitones: 4 + gaussian_noise: 0.1 + gaussian_noise_min_amplitude: 0.001 + gaussian_noise_max_amplitude: 0.015 + time_stretch: 0.01 + time_stretch_min_rate: 0.8 + time_stretch_max_rate: 1.25 + + +inference: + batch_size: 1 + dim_t: 512 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_bandit_bsrnn_multi_mus64.yaml b/data_pipeline/seperation/configs/config_vocals_bandit_bsrnn_multi_mus64.yaml new file mode 100644 index 0000000000000000000000000000000000000000..432ae32c19e6136806a718ca882afc516f2aa1f4 --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_bandit_bsrnn_multi_mus64.yaml @@ -0,0 +1,73 @@ +name: "MultiMaskMultiSourceBandSplitRNN" +audio: + chunk_size: 264600 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + in_channel: 1 + stems: ['vocals', 'other'] + band_specs: "musical" + n_bands: 64 + fs: 44100 + require_no_overlap: false + require_no_gap: true + normalize_channel_independently: false + treat_channel_as_feature: true + n_sqm_modules: 8 + emb_dim: 128 + rnn_dim: 256 + bidirectional: true + rnn_type: "GRU" + mlp_dim: 512 + hidden_activation: "Tanh" + hidden_activation_kwargs: null + complex_mask: true + n_fft: 2048 + win_length: 2048 + hop_length: 512 + window_fn: "hann_window" + wkwargs: null + power: null + center: true + normalized: true + pad_mode: "constant" + onesided: true + +training: + batch_size: 4 + gradient_accumulation_steps: 4 + grad_clip: 0 + instruments: + - vocals + - other + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_bs_roformer.yaml b/data_pipeline/seperation/configs/config_vocals_bs_roformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69a3ee8d84d4336a272bd9e0c85288c89d7aedee --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_bs_roformer.yaml @@ -0,0 +1,138 @@ +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 256 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dim: 192 + depth: 6 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: true + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 10 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_htdemucs.yaml b/data_pipeline/seperation/configs/config_vocals_htdemucs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..796004a5cbd8a841963b5b41616ffd5cf8b247ea --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_htdemucs.yaml @@ -0,0 +1,123 @@ +audio: + chunk_size: 485100 # samplerate * segment + min_mean_abs: 0.001 + hop_length: 1024 + +training: + batch_size: 10 + gradient_accumulation_steps: 1 + grad_clip: 0 + segment: 11 + shift: 1 + samplerate: 44100 + channels: 2 + normalize: true + instruments: ['vocals', 'other'] + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + optimizer: adam + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: [0.2, 0.02] + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + num_overlap: 2 + batch_size: 8 + +model: htdemucs + +htdemucs: # see demucs/htdemucs.py for a detailed description + # Channels + channels: 48 + channels_time: + growth: 2 + # STFT + num_subbands: 1 + nfft: 4096 + wiener_iters: 0 + end_iters: 0 + wiener_residual: false + cac: true + # Main structure + depth: 4 + rewrite: true + # Frequency Branch + multi_freqs: [] + multi_freqs_depth: 3 + freq_emb: 0.2 + emb_scale: 10 + emb_smooth: true + # Convolutions + kernel_size: 8 + stride: 4 + time_stride: 2 + context: 1 + context_enc: 0 + # normalization + norm_starts: 4 + norm_groups: 4 + # DConv residual branch + dconv_mode: 3 + dconv_depth: 2 + dconv_comp: 8 + dconv_init: 1e-3 + # Before the Transformer + bottom_channels: 512 + # CrossTransformer + # ------ Common to all + # Regular parameters + t_layers: 5 + t_hidden_scale: 4.0 + t_heads: 8 + t_dropout: 0.0 + t_layer_scale: True + t_gelu: True + # ------------- Positional Embedding + t_emb: sin + t_max_positions: 10000 # for the scaled embedding + t_max_period: 10000.0 + t_weight_pos_embed: 1.0 + t_cape_mean_normalize: True + t_cape_augment: True + t_cape_glob_loc_scale: [5000.0, 1.0, 1.4] + t_sin_random_shift: 0 + # ------------- norm before a transformer encoder + t_norm_in: True + t_norm_in_group: False + # ------------- norm inside the encoder + t_group_norm: False + t_norm_first: True + t_norm_out: True + # ------------- optim + t_weight_decay: 0.0 + t_lr: + # ------------- sparsity + t_sparse_self_attn: False + t_sparse_cross_attn: False + t_mask_type: diag + t_mask_random_seed: 42 + t_sparse_attn_window: 400 + t_global_window: 100 + t_sparsity: 0.95 + t_auto_sparsity: False + # Cross Encoder First (False) + t_cross_first: False + # Weight init + rescale: 0.1 + diff --git a/data_pipeline/seperation/configs/config_vocals_mdx23c.yaml b/data_pipeline/seperation/configs/config_vocals_mdx23c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df73a6bd86570f81b7c9c5b3a7251bafb135294f --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_mdx23c.yaml @@ -0,0 +1,95 @@ +audio: + chunk_size: 261120 + dim_f: 4096 + dim_t: 256 + hop_length: 1024 + n_fft: 8192 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + act: gelu + bottleneck_factor: 4 + growth: 128 + norm: InstanceNorm + num_blocks_per_scale: 2 + num_channels: 128 + num_scales: 5 + num_subbands: 4 + scale: + - 2 + - 2 + +training: + batch_size: 6 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 9.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + + # apply mp3 compression to mixture only (emulate downloading mp3 from internet) + mp3_compression_on_mixture: 0.01 + mp3_compression_on_mixture_bitrate_min: 32 + mp3_compression_on_mixture_bitrate_max: 320 + mp3_compression_on_mixture_backend: "lameenc" + + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + mp3_compression: 0.01 + mp3_compression_min_bitrate: 32 + mp3_compression_max_bitrate: 320 + mp3_compression_backend: "lameenc" + + vocals: + pitch_shift: 0.1 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.1 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.7 + other: + pitch_shift: 0.1 + pitch_shift_min_semitones: -4 + pitch_shift_max_semitones: 4 + gaussian_noise: 0.1 + gaussian_noise_min_amplitude: 0.001 + gaussian_noise_max_amplitude: 0.015 + time_stretch: 0.01 + time_stretch_min_rate: 0.8 + time_stretch_max_rate: 1.25 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_mel_band_roformer.yaml b/data_pipeline/seperation/configs/config_vocals_mel_band_roformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ec0ce45992bf6bbf35e496c0e604f8d4036e965 --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_mel_band_roformer.yaml @@ -0,0 +1,77 @@ +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 256 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dim: 192 + depth: 8 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + num_bands: 60 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: True + dim_freqs_in: 1025 + sample_rate: 44100 # needed for mel filter bank from librosa + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: False + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 7 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 256 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_scnet.yaml b/data_pipeline/seperation/configs/config_vocals_scnet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9a8587573d57c373a4548420293f8f890e2fccf --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_scnet.yaml @@ -0,0 +1,71 @@ +audio: + chunk_size: 264600 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + sources: ['vocals', 'other'] + audio_channels: 2 + # dims: [4, 32, 64, 128] # small version + dims: [4, 64, 128, 256] + nfft: 4096 + hop_size: 1024 + win_size: 4096 + normalized: True + band_configs: { + 'low': { 'SR': .175, 'stride': 1, 'kernel': 3 }, + 'mid': { 'SR': .392, 'stride': 4, 'kernel': 4 }, + 'high': { 'SR': .433, 'stride': 16, 'kernel': 16 } + } + conv_depths: [3, 2, 1] + compress: 4 + conv_kernel: 3 + # Dual-path RNN + num_dplayer: 6 + expand: 1 + # mamba + use_mamba: False + mamba_config: { + 'd_stat': 16, + 'd_conv': 4, + 'd_expand': 2 + } + +training: + batch_size: 4 + gradient_accumulation_steps: 2 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-04 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: + !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 8 + dim_t: 256 + num_overlap: 4 diff --git a/data_pipeline/seperation/configs/config_vocals_scnet_unofficial.yaml b/data_pipeline/seperation/configs/config_vocals_scnet_unofficial.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d3e604e4992e1f4090da91227c9ecc5e66e9117 --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_scnet_unofficial.yaml @@ -0,0 +1,62 @@ +audio: + chunk_size: 264600 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + dims: [4, 32, 64, 128] + bandsplit_ratios: [.175, .392, .433] + downsample_strides: [1, 4, 16] + n_conv_modules: [3, 2, 1] + n_rnn_layers: 6 + rnn_hidden_dim: 128 + n_sources: 2 + + n_fft: 4096 + hop_length: 1024 + win_length: 4096 + stft_normalized: false + + use_mamba: false + d_state: 16 + d_conv: 4 + d_expand: 2 + +training: + batch_size: 10 + gradient_accumulation_steps: 2 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-04 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: + !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 8 + dim_t: 256 + num_overlap: 4 diff --git a/data_pipeline/seperation/configs/config_vocals_segm_models.yaml b/data_pipeline/seperation/configs/config_vocals_segm_models.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44711a0658a95289c8d3745a6d78114b937df1fa --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_segm_models.yaml @@ -0,0 +1,78 @@ +audio: + chunk_size: 261632 + dim_f: 4096 + dim_t: 512 + hop_length: 512 + n_fft: 8192 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders- + decoder_type: unet # unet, fpn + act: gelu + num_channels: 128 + num_subbands: 8 + +loss_multistft: + fft_sizes: + - 1024 + - 2048 + - 4096 + hop_sizes: + - 512 + - 1024 + - 2048 + win_lengths: + - 1024 + - 2048 + - 4096 + window: "hann_window" + scale: "mel" + n_bins: 128 + sample_rate: 44100 + perceptual_weighting: true + w_sc: 1.0 + w_log_mag: 1.0 + w_lin_mag: 0.0 + w_phs: 0.0 + mag_distance: "L1" + + +training: + batch_size: 8 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 2000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adamw + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 512 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/config_vocals_swin_upernet.yaml b/data_pipeline/seperation/configs/config_vocals_swin_upernet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ebc81cfae8803d17a104ecf0db3a6eeb3e37b99 --- /dev/null +++ b/data_pipeline/seperation/configs/config_vocals_swin_upernet.yaml @@ -0,0 +1,50 @@ +audio: + chunk_size: 261632 + dim_f: 4096 + dim_t: 512 + hop_length: 512 + n_fft: 8192 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + act: gelu + num_channels: 16 + num_subbands: 8 + +training: + batch_size: 14 + gradient_accumulation_steps: 4 + grad_clip: 0 + instruments: + - vocals + - other + lr: 3.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: null + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adamw + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 1 + dim_t: 512 + num_overlap: 4 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml b/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml new file mode 100644 index 0000000000000000000000000000000000000000..135a051897dee27285ac46ee350afe1e1ec02011 --- /dev/null +++ b/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_317_sdr_12.9755.yaml @@ -0,0 +1,126 @@ +audio: + chunk_size: 352800 + dim_f: 1024 + dim_t: 801 # don't work (use in model) + hop_length: 441 # don't work (use in model) + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + dim: 512 + depth: 12 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: true + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 441 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 2 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 1.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +inference: + batch_size: 4 + dim_t: 801 + num_overlap: 2 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml b/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5e9a0b670759dd378af60e09e0a5e3c650cbf7c --- /dev/null +++ b/data_pipeline/seperation/configs/viperx/model_bs_roformer_ep_937_sdr_10.5309.yaml @@ -0,0 +1,138 @@ +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 256 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.001 + +model: + dim: 384 + depth: 12 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: true + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 4 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 5.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: other + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + +inference: + batch_size: 8 + dim_t: 512 + num_overlap: 2 \ No newline at end of file diff --git a/data_pipeline/seperation/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml b/data_pipeline/seperation/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7cb922c9c06076e382826decc017ca9d760b9623 --- /dev/null +++ b/data_pipeline/seperation/configs/viperx/model_mel_band_roformer_ep_3005_sdr_11.4360.yaml @@ -0,0 +1,65 @@ +audio: + chunk_size: 352800 + dim_f: 1024 + dim_t: 801 # don't work (use in model) + hop_length: 441 # don't work (use in model) + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + dim: 384 + depth: 12 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + num_bands: 60 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: True + dim_freqs_in: 1025 + sample_rate: 44100 # needed for mel filter bank from librosa + stft_n_fft: 2048 + stft_hop_length: 441 + stft_win_length: 2048 + stft_normalized: False + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 1 + gradient_accumulation_steps: 8 + grad_clip: 0 + instruments: + - vocals + - other + lr: 4.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true + +inference: + batch_size: 4 + dim_t: 801 + num_overlap: 2 \ No newline at end of file diff --git a/data_pipeline/seperation/dataset.py b/data_pipeline/seperation/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..fd6fa5ab0cfcf1ced6192fafd606ca059fb149ab --- /dev/null +++ b/data_pipeline/seperation/dataset.py @@ -0,0 +1,566 @@ +# coding: utf-8 +__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/' + + +import os +import random +import numpy as np +import torch +import soundfile as sf +import pickle +import time +from tqdm import tqdm +from glob import glob +import audiomentations as AU +import pedalboard as PB +import warnings +warnings.filterwarnings("ignore") + + +def load_chunk(path, length, chunk_size, offset=None): + if chunk_size <= length: + if offset is None: + offset = np.random.randint(length - chunk_size + 1) + x = sf.read(path, dtype='float32', start=offset, frames=chunk_size)[0] + else: + x = sf.read(path, dtype='float32')[0] + pad = np.zeros([chunk_size - length, 2]) + x = np.concatenate([x, pad]) + return x.T + + +class MSSDataset(torch.utils.data.Dataset): + def __init__(self, config, data_path, metadata_path="metadata.pkl", dataset_type=1, batch_size=None): + self.config = config + self.dataset_type = dataset_type # 1, 2, 3 or 4 + self.instruments = instruments = config.training.instruments + if batch_size is None: + batch_size = config.training.batch_size + self.batch_size = batch_size + self.file_types = ['wav', 'flac'] + + # Augmentation block + self.aug = False + if 'augmentations' in config: + if config['augmentations'].enable is True: + print('Use augmentation for training') + self.aug = True + else: + print('There is no augmentations block in config. Augmentations disabled for training...') + + # metadata_path = data_path + '/metadata' + try: + metadata = pickle.load(open(metadata_path, 'rb')) + print('Loading songs data from cache: {}. If you updated dataset remove {} before training!'.format(metadata_path, os.path.basename(metadata_path))) + except Exception: + print('Collecting metadata for', str(data_path), 'Dataset type:', self.dataset_type) + if self.dataset_type in [1, 4]: + metadata = [] + track_paths = [] + if type(data_path) == list: + for tp in data_path: + track_paths += sorted(glob(tp + '/*')) + else: + track_paths += sorted(glob(data_path + '/*')) + + track_paths = [path for path in track_paths if os.path.basename(path)[0] != '.' and os.path.isdir(path)] + for path in tqdm(track_paths): + # Check lengths of all instruments (it can be different in some cases) + lengths_arr = [] + for instr in instruments: + length = -1 + for extension in self.file_types: + path_to_audio_file = path + '/{}.{}'.format(instr, extension) + if os.path.isfile(path_to_audio_file): + length = len(sf.read(path_to_audio_file)[0]) + break + if length == -1: + print('Cant find file "{}" in folder {}'.format(instr, path)) + continue + lengths_arr.append(length) + lengths_arr = np.array(lengths_arr) + if lengths_arr.min() != lengths_arr.max(): + print('Warning: lengths of stems are different for path: {}. ({} != {})'.format( + path, + lengths_arr.min(), + lengths_arr.max()) + ) + # We use minimum to allow overflow for soundfile read in non-equal length cases + metadata.append((path, lengths_arr.min())) + elif self.dataset_type == 2: + metadata = dict() + for instr in self.instruments: + metadata[instr] = [] + track_paths = [] + if type(data_path) == list: + for tp in data_path: + track_paths += sorted(glob(tp + '/{}/*.wav'.format(instr))) + track_paths += sorted(glob(tp + '/{}/*.flac'.format(instr))) + else: + track_paths += sorted(glob(data_path + '/{}/*.wav'.format(instr))) + track_paths += sorted(glob(data_path + '/{}/*.flac'.format(instr))) + + for path in tqdm(track_paths): + length = len(sf.read(path)[0]) + metadata[instr].append((path, length)) + elif self.dataset_type == 3: + import pandas as pd + if type(data_path) != list: + data_path = [data_path] + + metadata = dict() + for i in range(len(data_path)): + print('Reading tracks from: {}'.format(data_path[i])) + df = pd.read_csv(data_path[i]) + + skipped = 0 + for instr in self.instruments: + part = df[df['instrum'] == instr].copy() + print('Tracks found for {}: {}'.format(instr, len(part))) + for instr in self.instruments: + part = df[df['instrum'] == instr].copy() + metadata[instr] = [] + track_paths = list(part['path'].values) + for path in tqdm(track_paths): + if not os.path.isfile(path): + print('Cant find track: {}'.format(path)) + skipped += 1 + continue + # print(path) + try: + length = len(sf.read(path)[0]) + except: + print('Problem with path: {}'.format(path)) + skipped += 1 + continue + metadata[instr].append((path, length)) + if skipped > 0: + print('Missing tracks: {} from {}'.format(skipped, len(df))) + else: + print('Unknown dataset type: {}. Must be 1, 2 or 3'.format(self.dataset_type)) + exit() + + pickle.dump(metadata, open(metadata_path, 'wb')) + + if self.dataset_type in [1, 4]: + if len(metadata) > 0: + print('Found tracks in dataset: {}'.format(len(metadata))) + else: + print('No tracks found for training. Check paths you provided!') + exit() + else: + for instr in self.instruments: + print('Found tracks for {} in dataset: {}'.format(instr, len(metadata[instr]))) + self.metadata = metadata + self.chunk_size = config.audio.chunk_size + self.min_mean_abs = config.audio.min_mean_abs + + def __len__(self): + return self.config.training.num_steps * self.batch_size + + def load_source(self, metadata, instr): + while True: + if self.dataset_type in [1, 4]: + track_path, track_length = random.choice(metadata) + for extension in self.file_types: + path_to_audio_file = track_path + '/{}.{}'.format(instr, extension) + if os.path.isfile(path_to_audio_file): + try: + source = load_chunk(path_to_audio_file, track_length, self.chunk_size) + except Exception as e: + # Sometimes error during FLAC reading, catch it and use zero stem + print('Error: {} Path: {}'.format(e, path_to_audio_file)) + source = np.zeros((2, self.chunk_size), dtype=np.float32) + break + else: + track_path, track_length = random.choice(metadata[instr]) + try: + source = load_chunk(track_path, track_length, self.chunk_size) + except Exception as e: + # Sometimes error during FLAC reading, catch it and use zero stem + print('Error: {} Path: {}'.format(e, track_path)) + source = np.zeros((2, self.chunk_size), dtype=np.float32) + + if np.abs(source).mean() >= self.min_mean_abs: # remove quiet chunks + break + if self.aug: + source = self.augm_data(source, instr) + return torch.tensor(source, dtype=torch.float32) + + def load_random_mix(self): + res = [] + for instr in self.instruments: + s1 = self.load_source(self.metadata, instr) + # Mixup augmentation. Multiple mix of same type of stems + if self.aug: + if 'mixup' in self.config['augmentations']: + if self.config['augmentations'].mixup: + mixup = [s1] + for prob in self.config.augmentations.mixup_probs: + if random.uniform(0, 1) < prob: + s2 = self.load_source(self.metadata, instr) + mixup.append(s2) + mixup = torch.stack(mixup, dim=0) + loud_values = np.random.uniform( + low=self.config.augmentations.loudness_min, + high=self.config.augmentations.loudness_max, + size=(len(mixup),) + ) + loud_values = torch.tensor(loud_values, dtype=torch.float32) + mixup *= loud_values[:, None, None] + s1 = mixup.mean(dim=0, dtype=torch.float32) + res.append(s1) + res = torch.stack(res) + return res + + def load_aligned_data(self): + track_path, track_length = random.choice(self.metadata) + res = [] + for i in self.instruments: + attempts = 10 + while attempts: + for extension in self.file_types: + path_to_audio_file = track_path + '/{}.{}'.format(i, extension) + if os.path.isfile(path_to_audio_file): + try: + source = load_chunk(path_to_audio_file, track_length, self.chunk_size) + except Exception as e: + # Sometimes error during FLAC reading, catch it and use zero stem + print('Error: {} Path: {}'.format(e, path_to_audio_file)) + source = np.zeros((2, self.chunk_size), dtype=np.float32) + break + if np.abs(source).mean() >= self.min_mean_abs: # remove quiet chunks + break + attempts -= 1 + if attempts <= 0: + print('Attempts max!', track_path) + res.append(source) + res = np.stack(res, axis=0) + if self.aug: + for i, instr in enumerate(self.instruments): + res[i] = self.augm_data(res[i], instr) + return torch.tensor(res, dtype=torch.float32) + + def augm_data(self, source, instr): + # source.shape = (2, 261120) - first channels, second length + source_shape = source.shape + applied_augs = [] + if 'all' in self.config['augmentations']: + augs = self.config['augmentations']['all'] + else: + augs = dict() + + # We need to add to all augmentations specific augs for stem. And rewrite values if needed + if instr in self.config['augmentations']: + for el in self.config['augmentations'][instr]: + augs[el] = self.config['augmentations'][instr][el] + + # Channel shuffle + if 'channel_shuffle' in augs: + if augs['channel_shuffle'] > 0: + if random.uniform(0, 1) < augs['channel_shuffle']: + source = source[::-1].copy() + applied_augs.append('channel_shuffle') + # Random inverse + if 'random_inverse' in augs: + if augs['random_inverse'] > 0: + if random.uniform(0, 1) < augs['random_inverse']: + source = source[:, ::-1].copy() + applied_augs.append('random_inverse') + # Random polarity (multiply -1) + if 'random_polarity' in augs: + if augs['random_polarity'] > 0: + if random.uniform(0, 1) < augs['random_polarity']: + source = -source.copy() + applied_augs.append('random_polarity') + # Random pitch shift + if 'pitch_shift' in augs: + if augs['pitch_shift'] > 0: + if random.uniform(0, 1) < augs['pitch_shift']: + apply_aug = AU.PitchShift( + min_semitones=augs['pitch_shift_min_semitones'], + max_semitones=augs['pitch_shift_max_semitones'], + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('pitch_shift') + # Random seven band parametric eq + if 'seven_band_parametric_eq' in augs: + if augs['seven_band_parametric_eq'] > 0: + if random.uniform(0, 1) < augs['seven_band_parametric_eq']: + apply_aug = AU.SevenBandParametricEQ( + min_gain_db=augs['seven_band_parametric_eq_min_gain_db'], + max_gain_db=augs['seven_band_parametric_eq_max_gain_db'], + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('seven_band_parametric_eq') + # Random tanh distortion + if 'tanh_distortion' in augs: + if augs['tanh_distortion'] > 0: + if random.uniform(0, 1) < augs['tanh_distortion']: + apply_aug = AU.TanhDistortion( + min_distortion=augs['tanh_distortion_min'], + max_distortion=augs['tanh_distortion_max'], + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('tanh_distortion') + # Random MP3 Compression + if 'mp3_compression' in augs: + if augs['mp3_compression'] > 0: + if random.uniform(0, 1) < augs['mp3_compression']: + apply_aug = AU.Mp3Compression( + min_bitrate=augs['mp3_compression_min_bitrate'], + max_bitrate=augs['mp3_compression_max_bitrate'], + backend=augs['mp3_compression_backend'], + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('mp3_compression') + # Random AddGaussianNoise + if 'gaussian_noise' in augs: + if augs['gaussian_noise'] > 0: + if random.uniform(0, 1) < augs['gaussian_noise']: + apply_aug = AU.AddGaussianNoise( + min_amplitude=augs['gaussian_noise_min_amplitude'], + max_amplitude=augs['gaussian_noise_max_amplitude'], + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('gaussian_noise') + # Random TimeStretch + if 'time_stretch' in augs: + if augs['time_stretch'] > 0: + if random.uniform(0, 1) < augs['time_stretch']: + apply_aug = AU.TimeStretch( + min_rate=augs['time_stretch_min_rate'], + max_rate=augs['time_stretch_max_rate'], + leave_length_unchanged=True, + p=1.0 + ) + source = apply_aug(samples=source, sample_rate=44100) + applied_augs.append('time_stretch') + + # Possible fix of shape + if source_shape != source.shape: + source = source[..., :source_shape[-1]] + + # Random Reverb + if 'pedalboard_reverb' in augs: + if augs['pedalboard_reverb'] > 0: + if random.uniform(0, 1) < augs['pedalboard_reverb']: + room_size = random.uniform( + augs['pedalboard_reverb_room_size_min'], + augs['pedalboard_reverb_room_size_max'], + ) + damping = random.uniform( + augs['pedalboard_reverb_damping_min'], + augs['pedalboard_reverb_damping_max'], + ) + wet_level = random.uniform( + augs['pedalboard_reverb_wet_level_min'], + augs['pedalboard_reverb_wet_level_max'], + ) + dry_level = random.uniform( + augs['pedalboard_reverb_dry_level_min'], + augs['pedalboard_reverb_dry_level_max'], + ) + width = random.uniform( + augs['pedalboard_reverb_width_min'], + augs['pedalboard_reverb_width_max'], + ) + board = PB.Pedalboard([PB.Reverb( + room_size=room_size, # 0.1 - 0.9 + damping=damping, # 0.1 - 0.9 + wet_level=wet_level, # 0.1 - 0.9 + dry_level=dry_level, # 0.1 - 0.9 + width=width, # 0.9 - 1.0 + freeze_mode=0.0, + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_reverb') + + # Random Chorus + if 'pedalboard_chorus' in augs: + if augs['pedalboard_chorus'] > 0: + if random.uniform(0, 1) < augs['pedalboard_chorus']: + rate_hz = random.uniform( + augs['pedalboard_chorus_rate_hz_min'], + augs['pedalboard_chorus_rate_hz_max'], + ) + depth = random.uniform( + augs['pedalboard_chorus_depth_min'], + augs['pedalboard_chorus_depth_max'], + ) + centre_delay_ms = random.uniform( + augs['pedalboard_chorus_centre_delay_ms_min'], + augs['pedalboard_chorus_centre_delay_ms_max'], + ) + feedback = random.uniform( + augs['pedalboard_chorus_feedback_min'], + augs['pedalboard_chorus_feedback_max'], + ) + mix = random.uniform( + augs['pedalboard_chorus_mix_min'], + augs['pedalboard_chorus_mix_max'], + ) + board = PB.Pedalboard([PB.Chorus( + rate_hz=rate_hz, + depth=depth, + centre_delay_ms=centre_delay_ms, + feedback=feedback, + mix=mix, + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_chorus') + + # Random Phazer + if 'pedalboard_phazer' in augs: + if augs['pedalboard_phazer'] > 0: + if random.uniform(0, 1) < augs['pedalboard_phazer']: + rate_hz = random.uniform( + augs['pedalboard_phazer_rate_hz_min'], + augs['pedalboard_phazer_rate_hz_max'], + ) + depth = random.uniform( + augs['pedalboard_phazer_depth_min'], + augs['pedalboard_phazer_depth_max'], + ) + centre_frequency_hz = random.uniform( + augs['pedalboard_phazer_centre_frequency_hz_min'], + augs['pedalboard_phazer_centre_frequency_hz_max'], + ) + feedback = random.uniform( + augs['pedalboard_phazer_feedback_min'], + augs['pedalboard_phazer_feedback_max'], + ) + mix = random.uniform( + augs['pedalboard_phazer_mix_min'], + augs['pedalboard_phazer_mix_max'], + ) + board = PB.Pedalboard([PB.Phaser( + rate_hz=rate_hz, + depth=depth, + centre_frequency_hz=centre_frequency_hz, + feedback=feedback, + mix=mix, + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_phazer') + + # Random Distortion + if 'pedalboard_distortion' in augs: + if augs['pedalboard_distortion'] > 0: + if random.uniform(0, 1) < augs['pedalboard_distortion']: + drive_db = random.uniform( + augs['pedalboard_distortion_drive_db_min'], + augs['pedalboard_distortion_drive_db_max'], + ) + board = PB.Pedalboard([PB.Distortion( + drive_db=drive_db, + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_distortion') + + # Random PitchShift + if 'pedalboard_pitch_shift' in augs: + if augs['pedalboard_pitch_shift'] > 0: + if random.uniform(0, 1) < augs['pedalboard_pitch_shift']: + semitones = random.uniform( + augs['pedalboard_pitch_shift_semitones_min'], + augs['pedalboard_pitch_shift_semitones_max'], + ) + board = PB.Pedalboard([PB.PitchShift( + semitones=semitones + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_pitch_shift') + + # Random Resample + if 'pedalboard_resample' in augs: + if augs['pedalboard_resample'] > 0: + if random.uniform(0, 1) < augs['pedalboard_resample']: + target_sample_rate = random.uniform( + augs['pedalboard_resample_target_sample_rate_min'], + augs['pedalboard_resample_target_sample_rate_max'], + ) + board = PB.Pedalboard([PB.Resample( + target_sample_rate=target_sample_rate + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_resample') + + # Random Bitcrash + if 'pedalboard_bitcrash' in augs: + if augs['pedalboard_bitcrash'] > 0: + if random.uniform(0, 1) < augs['pedalboard_bitcrash']: + bit_depth = random.uniform( + augs['pedalboard_bitcrash_bit_depth_min'], + augs['pedalboard_bitcrash_bit_depth_max'], + ) + board = PB.Pedalboard([PB.Bitcrush( + bit_depth=bit_depth + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_bitcrash') + + # Random MP3Compressor + if 'pedalboard_mp3_compressor' in augs: + if augs['pedalboard_mp3_compressor'] > 0: + if random.uniform(0, 1) < augs['pedalboard_mp3_compressor']: + vbr_quality = random.uniform( + augs['pedalboard_mp3_compressor_pedalboard_mp3_compressor_min'], + augs['pedalboard_mp3_compressor_pedalboard_mp3_compressor_max'], + ) + board = PB.Pedalboard([PB.MP3Compressor( + vbr_quality=vbr_quality + )]) + source = board(source, 44100) + applied_augs.append('pedalboard_mp3_compressor') + + # print(applied_augs) + return source + + def __getitem__(self, index): + if self.dataset_type in [1, 2, 3]: + res = self.load_random_mix() + else: + res = self.load_aligned_data() + + # Randomly change loudness of each stem + if self.aug: + if 'loudness' in self.config['augmentations']: + if self.config['augmentations']['loudness']: + loud_values = np.random.uniform( + low=self.config['augmentations']['loudness_min'], + high=self.config['augmentations']['loudness_max'], + size=(len(res),) + ) + loud_values = torch.tensor(loud_values, dtype=torch.float32) + res *= loud_values[:, None, None] + + mix = res.sum(0) + + if self.aug: + if 'mp3_compression_on_mixture' in self.config['augmentations']: + apply_aug = AU.Mp3Compression( + min_bitrate=self.config['augmentations']['mp3_compression_on_mixture_bitrate_min'], + max_bitrate=self.config['augmentations']['mp3_compression_on_mixture_bitrate_max'], + backend=self.config['augmentations']['mp3_compression_on_mixture_backend'], + p=self.config['augmentations']['mp3_compression_on_mixture'] + ) + mix_conv = mix.cpu().numpy().astype(np.float32) + required_shape = mix_conv.shape + mix = apply_aug(samples=mix_conv, sample_rate=44100) + # Sometimes it gives longer audio (so we cut) + if mix.shape != required_shape: + mix = mix[..., :required_shape[-1]] + mix = torch.tensor(mix, dtype=torch.float32) + + # If we need only given stem (for roformers) + if self.config.training.target_instrument is not None: + index = self.config.training.instruments.index(self.config.training.target_instrument) + return res[index], mix + + return res, mix diff --git a/data_pipeline/seperation/docs/augmentations.md b/data_pipeline/seperation/docs/augmentations.md new file mode 100644 index 0000000000000000000000000000000000000000..41d03585111340cc2c05dfc603753e5af1348a7e --- /dev/null +++ b/data_pipeline/seperation/docs/augmentations.md @@ -0,0 +1,146 @@ +### Augmentations + +Augmentations allows to change stems on the fly increasing the size of dataset by creating new samples from old samples. +Now control for augmentations is done from config file. Below you can find the example of full config, +which includes all available augmentations: + +```config +augmentations: + enable: true # enable or disable all augmentations (to fast disable if needed) + loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) + loudness_min: 0.5 + loudness_max: 1.5 + mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3) + mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02) + - 0.2 + - 0.02 + mixup_loudness_min: 0.5 + mixup_loudness_max: 1.5 + + # apply mp3 compression to mixture only (emulate downloading mp3 from internet) + mp3_compression_on_mixture: 0.01 + mp3_compression_on_mixture_bitrate_min: 32 + mp3_compression_on_mixture_bitrate_max: 320 + mp3_compression_on_mixture_backend: "lameenc" + + all: + channel_shuffle: 0.5 # Set 0 or lower to disable + random_inverse: 0.1 # inverse track (better lower probability) + random_polarity: 0.5 # polarity change (multiply waveform to -1) + mp3_compression: 0.01 + mp3_compression_min_bitrate: 32 + mp3_compression_max_bitrate: 320 + mp3_compression_backend: "lameenc" + + # pedalboard reverb block + pedalboard_reverb: 0.01 + pedalboard_reverb_room_size_min: 0.1 + pedalboard_reverb_room_size_max: 0.9 + pedalboard_reverb_damping_min: 0.1 + pedalboard_reverb_damping_max: 0.9 + pedalboard_reverb_wet_level_min: 0.1 + pedalboard_reverb_wet_level_max: 0.9 + pedalboard_reverb_dry_level_min: 0.1 + pedalboard_reverb_dry_level_max: 0.9 + pedalboard_reverb_width_min: 0.9 + pedalboard_reverb_width_max: 1.0 + + # pedalboard chorus block + pedalboard_chorus: 0.01 + pedalboard_chorus_rate_hz_min: 1.0 + pedalboard_chorus_rate_hz_max: 7.0 + pedalboard_chorus_depth_min: 0.25 + pedalboard_chorus_depth_max: 0.95 + pedalboard_chorus_centre_delay_ms_min: 3 + pedalboard_chorus_centre_delay_ms_max: 10 + pedalboard_chorus_feedback_min: 0.0 + pedalboard_chorus_feedback_max: 0.5 + pedalboard_chorus_mix_min: 0.1 + pedalboard_chorus_mix_max: 0.9 + + # pedalboard phazer block + pedalboard_phazer: 0.01 + pedalboard_phazer_rate_hz_min: 1.0 + pedalboard_phazer_rate_hz_max: 10.0 + pedalboard_phazer_depth_min: 0.25 + pedalboard_phazer_depth_max: 0.95 + pedalboard_phazer_centre_frequency_hz_min: 200 + pedalboard_phazer_centre_frequency_hz_max: 12000 + pedalboard_phazer_feedback_min: 0.0 + pedalboard_phazer_feedback_max: 0.5 + pedalboard_phazer_mix_min: 0.1 + pedalboard_phazer_mix_max: 0.9 + + # pedalboard distortion block + pedalboard_distortion: 0.01 + pedalboard_distortion_drive_db_min: 1.0 + pedalboard_distortion_drive_db_max: 25.0 + + # pedalboard pitch shift block + pedalboard_pitch_shift: 0.01 + pedalboard_pitch_shift_semitones_min: -7 + pedalboard_pitch_shift_semitones_max: 7 + + # pedalboard resample block + pedalboard_resample: 0.01 + pedalboard_resample_target_sample_rate_min: 4000 + pedalboard_resample_target_sample_rate_max: 44100 + + # pedalboard bitcrash block + pedalboard_bitcrash: 0.01 + pedalboard_bitcrash_bit_depth_min: 4 + pedalboard_bitcrash_bit_depth_max: 16 + + # pedalboard mp3 compressor block + pedalboard_mp3_compressor: 0.01 + pedalboard_mp3_compressor_pedalboard_mp3_compressor_min: 0 + pedalboard_mp3_compressor_pedalboard_mp3_compressor_max: 9.999 + + vocals: + pitch_shift: 0.1 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.1 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.7 + bass: + pitch_shift: 0.1 + pitch_shift_min_semitones: -2 + pitch_shift_max_semitones: 2 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -3 + seven_band_parametric_eq_max_gain_db: 6 + tanh_distortion: 0.2 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.5 + drums: + pitch_shift: 0.33 + pitch_shift_min_semitones: -5 + pitch_shift_max_semitones: 5 + seven_band_parametric_eq: 0.25 + seven_band_parametric_eq_min_gain_db: -9 + seven_band_parametric_eq_max_gain_db: 9 + tanh_distortion: 0.33 + tanh_distortion_min: 0.1 + tanh_distortion_max: 0.6 + other: + pitch_shift: 0.1 + pitch_shift_min_semitones: -4 + pitch_shift_max_semitones: 4 + gaussian_noise: 0.1 + gaussian_noise_min_amplitude: 0.001 + gaussian_noise_max_amplitude: 0.015 + time_stretch: 0.01 + time_stretch_min_rate: 0.8 + time_stretch_max_rate: 1.25 +``` + +You can copypaste it into your config to use augmentations. +Notes: +* To completely disable all augmentations you can either remove `augmentations` section from config or set `enable` to `false`. +* If you want to disable some augmentation, just set it to zero. +* Augmentations in `all` subsections applied to all stems +* Augmentations in `vocals`, `bass` etc subsections applied only to corresponding stems. You can create such subsections for all stems which are given in `training.instruments`. \ No newline at end of file diff --git a/data_pipeline/seperation/docs/bs_roformer_info.md b/data_pipeline/seperation/docs/bs_roformer_info.md new file mode 100644 index 0000000000000000000000000000000000000000..ad7bfc9f8f57e54de1be42cdcdb14775811ebe36 --- /dev/null +++ b/data_pipeline/seperation/docs/bs_roformer_info.md @@ -0,0 +1,145 @@ +### Batch sizes for BSRoformer + +You can use table below to choose BS Roformer `batch_size` parameter for training based on your GPUs. Batch size values provided for single GPU. If you have several GPUs you need to multiply value on number of GPUs. + +| chunk_size | dim | depth | batch_size (A6000 48GB) | batch_size (3090/4090 24GB) | batch_size (16GB) | +|:----------:|:---:|:-----:|:-----------------------:|:---------------------------:|:-----------------:| +| 131584 | 128 | 6 | 10 | 5 | 3 | +| 131584 | 256 | 6 | 8 | 4 | 2 | +| 131584 | 384 | 6 | 7 | 3 | 2 | +| 131584 | 512 | 6 | 6 | 3 | 2 | +| 131584 | 256 | 8 | 6 | 3 | 2 | +| 131584 | 256 | 12 | 4 | 2 | 1 | +| 263168 | 128 | 6 | 4 | 2 | 1 | +| 263168 | 256 | 6 | 3 | 1 | 1 | +| 352800 | 128 | 6 | 2 | 1 | - | +| 352800 | 256 | 6 | 2 | 1 | - | +| 352800 | 384 | 12 | 1 | - | - | +| 352800 | 512 | 12 | - | - | - | + + +Parameters obtained with initial config: + +``` +audio: + chunk_size: 131584 + dim_f: 1024 + dim_t: 515 + hop_length: 512 + n_fft: 2048 + num_channels: 2 + sample_rate: 44100 + min_mean_abs: 0.000 + +model: + dim: 384 + depth: 12 + stereo: true + num_stems: 1 + time_transformer_depth: 1 + freq_transformer_depth: 1 + linear_transformer_depth: 0 + freqs_per_bands: !!python/tuple + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 2 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 4 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 12 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 24 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 48 + - 128 + - 129 + dim_head: 64 + heads: 8 + attn_dropout: 0.1 + ff_dropout: 0.1 + flash_attn: false + dim_freqs_in: 1025 + stft_n_fft: 2048 + stft_hop_length: 512 + stft_win_length: 2048 + stft_normalized: false + mask_estimator_depth: 2 + multi_stft_resolution_loss_weight: 1.0 + multi_stft_resolutions_window_sizes: !!python/tuple + - 4096 + - 2048 + - 1024 + - 512 + - 256 + multi_stft_hop_size: 147 + multi_stft_normalized: False + +training: + batch_size: 1 + gradient_accumulation_steps: 1 + grad_clip: 0 + instruments: + - vocals + - other + lr: 3.0e-05 + patience: 2 + reduce_factor: 0.95 + target_instrument: vocals + num_epochs: 1000 + num_steps: 1000 + q: 0.95 + coarse_loss_clip: true + ema_momentum: 0.999 + optimizer: adam + other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental + use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true +``` diff --git a/data_pipeline/seperation/docs/changes.md b/data_pipeline/seperation/docs/changes.md new file mode 100644 index 0000000000000000000000000000000000000000..9aeba78f94f0b569849ac6560c40f68c82f6206d --- /dev/null +++ b/data_pipeline/seperation/docs/changes.md @@ -0,0 +1,20 @@ +### Changes + +#### v1.0.2 + +* Added multi GPU validation (earlier validation was performed on single GPU) +* `training.batch_size` in config now must be set for single GPU (if you use multiple GPUs it will be automatically multiplied by number of GPUs) + +#### v1.0.3 + +* Added "spawn" fix for multiprocessing +* Function `get_model_from_config` now takes path of config as input. +* On latest version of pytorch some problems with torch.backends.cudnn.benchmark = True - big slow down. Fixed version 2.0.1 in requirements.txt +* `--valid_path` parameter for train.py now can accept several validation folders instead of one. Added warning if validation folder is empty. +* Small fix for AMP usage in Demucs models taken from config +* Support for Demucs3 mmi model was added +* GPU memory consumption was reduced during inference and validation. +* Some changes to repair click problems on the edges of segment. +* Added support to train on FLAC files. Some more error checks added. +* viperx's Roformer weights and configs added +* `--extract_instrumental` argument added to inference.py \ No newline at end of file diff --git a/data_pipeline/seperation/docs/dataset_types.md b/data_pipeline/seperation/docs/dataset_types.md new file mode 100644 index 0000000000000000000000000000000000000000..345faf7e105aa113deaa173972e5ba630c5c317a --- /dev/null +++ b/data_pipeline/seperation/docs/dataset_types.md @@ -0,0 +1,75 @@ +### Dataset types for training + +* **Type 1 (MUSDB)**: different folders. Each folder contains all needed stems in format _< stem name >.wav_. The same as in MUSDBHQ18 dataset. In latest code releases it's possible to use `flac` instead of `wav`. + +Example: +``` +--- Song 1: +------ vocals.wav +------ bass.wav +------ drums.wav +------ other.wav +--- Song 2: +------ vocals.wav +------ bass.wav +------ drums.wav +------ other.wav +--- Song 3: +........... +``` + +* **Type 2 (Stems)**: each folder is "stem name". Folder contains wav files which consists only of required stem. +``` +--- vocals: +------ vocals_1.wav +------ vocals_2.wav +------ vocals_3.wav +------ vocals_4.wav +------ ... +--- bass: +------ bass_1.wav +------ bass_2.wav +------ bass_3.wav +------ bass_4.wav +------ ... +........... +``` + +* **Type 3 (CSV file)**: + +You can provide CSV-file (or list of CSV-files) with following structure: +``` +instrum,path +vocals,/path/to/dataset/vocals_1.wav +vocals,/path/to/dataset2/vocals_v2.wav +vocals,/path/to/dataset3/vocals_some.wav +... +drums,/path/to/dataset/drums_good.wav +... +``` + +* **Type 4 (MUSDB Aligned)**: + +The same as Type 1, but during training all instruments will be from the same position of song. + +### Dataset for validation + +* The validation dataset must be the same structure as type 1 datasets (regardless of what type of dataset you're using for training), but also each folder must include `mixture.wav` for each song. `mixture.wav` - is the sum of all stems for song. + +Example: +``` +--- Song 1: +------ vocals.wav +------ bass.wav +------ drums.wav +------ other.wav +------ mixture.wav +--- Song 2: +------ vocals.wav +------ bass.wav +------ drums.wav +------ other.wav +------ mixture.wav +--- Song 3: +........... +``` diff --git a/data_pipeline/seperation/inference.py b/data_pipeline/seperation/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0476acd8536c502ac199ddb13451b84a89f65f70 --- /dev/null +++ b/data_pipeline/seperation/inference.py @@ -0,0 +1,116 @@ +# coding: utf-8 +__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/' + +import argparse +import time +import librosa +from tqdm import tqdm +import sys +import os +import glob +import torch +import numpy as np +import soundfile as sf +import torch.nn as nn +from utils import demix_track, demix_track_demucs, get_model_from_config + +import warnings +warnings.filterwarnings("ignore") + + +def run_folder(model, args, config, device, verbose=False): + start_time = time.time() + model.eval() + all_mixtures_path = glob.glob(args.input_folder + '/*.*') + print('Total files found: {}'.format(len(all_mixtures_path))) + + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + if not os.path.isdir(args.store_dir): + os.mkdir(args.store_dir) + + if not verbose: + all_mixtures_path = tqdm(all_mixtures_path) + + for path in all_mixtures_path: + if not verbose: + all_mixtures_path.set_postfix({'track': os.path.basename(path)}) + try: + # mix, sr = sf.read(path) + mix, sr = librosa.load(path, sr=44100, mono=False) + mix = mix.T + except Exception as e: + print('Can read track: {}'.format(path)) + print('Error message: {}'.format(str(e))) + continue + + # Convert mono to stereo if needed + if len(mix.shape) == 1: + mix = np.stack([mix, mix], axis=-1) + + mixture = torch.tensor(mix.T, dtype=torch.float32) + if args.model_type == 'htdemucs': + res = demix_track_demucs(config, model, mixture, device) + else: + res = demix_track(config, model, mixture, device) + for instr in instruments: + sf.write("{}/{}_{}.wav".format(args.store_dir, os.path.basename(path)[:-4], instr), res[instr].T, sr, subtype='FLOAT') + + if 'vocals' in instruments and args.extract_instrumental: + instrum_file_name = "{}/{}_{}.wav".format(args.store_dir, os.path.basename(path)[:-4], 'instrumental') + sf.write(instrum_file_name, mix - res['vocals'].T, sr, subtype='FLOAT') + + time.sleep(1) + print("Elapsed time: {:.2f} sec".format(time.time() - start_time)) + + +def proc_folder(args): + parser = argparse.ArgumentParser() + parser.add_argument("--model_type", type=str, default='mdx23c', help="One of mdx23c, htdemucs, segm_models, mel_band_roformer, bs_roformer, swin_upernet, bandit") + parser.add_argument("--config_path", type=str, help="path to config file") + parser.add_argument("--start_check_point", type=str, default='', help="Initial checkpoint to valid weights") + parser.add_argument("--input_folder", type=str, help="folder with mixtures to process") + parser.add_argument("--store_dir", default="", type=str, help="path to store results as wav file") + parser.add_argument("--model-dir", default="", type=str, help="path to store results as wav file") + parser.add_argument("--log-dir", default="", type=str, help="path to store results as wav file") + parser.add_argument("--device_ids", nargs='+', type=int, default=0, help='list of gpu ids') + parser.add_argument("--extract_instrumental", action='store_true', help="invert vocals to get instrumental if provided") + print(f"cuda{torch.cuda.is_available()}") + if args is None: + args = parser.parse_args() + else: + args = parser.parse_args(args) + + torch.backends.cudnn.benchmark = True + + model, config = get_model_from_config(args.model_type, args.config_path) + if args.start_check_point != '': + print('Start from checkpoint: {}'.format(args.start_check_point)) + state_dict = torch.load(args.start_check_point, map_location='cpu') + if args.model_type == 'htdemucs': + # Fix for htdemucs pround etrained models + if 'state' in state_dict: + state_dict = state_dict['state'] + model.load_state_dict(state_dict) + print("Instruments: {}".format(config.training.instruments)) + + if torch.cuda.is_available(): + device_ids = args.device_ids + if type(device_ids)==int: + device = torch.device(f'cuda:{device_ids}') + model = model.to(device) + else: + device = torch.device(f'cuda:{device_ids[0]}') + model = nn.DataParallel(model, device_ids=device_ids).to(device) + else: + device = 'cpu' + print('CUDA is not avilable. Run inference on CPU. It will be very slow...') + model = model.to(device) + + run_folder(model, args, config, device, verbose=False) + + +if __name__ == "__main__": + proc_folder(None) diff --git a/data_pipeline/seperation/inference.sh b/data_pipeline/seperation/inference.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b56ba2326f57ce455547cefd9f4f331d7d700eb --- /dev/null +++ b/data_pipeline/seperation/inference.sh @@ -0,0 +1,11 @@ +input_dir=$1 +output_dir=$2 +ckpt_dir=$3 + +python3 inference.py \ + --model_type bs_roformer \ + --config_path ${ckpt_dir}/model_bs_roformer_ep_317_sdr_12.9755.yaml \ + --start_check_point ${ckpt_dir}/model_bs_roformer_ep_317_sdr_12.9755.ckpt \ + --input_folder ${input_dir} \ + --store_dir ${output_dir} \ + --extract_instrumental \ No newline at end of file diff --git a/data_pipeline/seperation/inference_mp.py b/data_pipeline/seperation/inference_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..056bd7af0d46dc58a0b5771b72b0391c53c762dc --- /dev/null +++ b/data_pipeline/seperation/inference_mp.py @@ -0,0 +1,154 @@ +import torch +import torch.multiprocessing as mp +import os, sys +import threading +from tqdm import tqdm +import soundfile as sf +import threading +import librosa +import numpy as np +from utils import demix_track, demix_track_demucs, get_model_from_config +import traceback +import glob +import argparse + +import warnings +warnings.filterwarnings("ignore") + +def normalize_audio(y, target_dbfs=0): + max_amplitude = np.max(np.abs(y)) + if max_amplitude < 0.1: + return y + + target_amplitude = 10.0**(target_dbfs / 20.0) + scale_factor = target_amplitude / max_amplitude + + normalized_audio = y * scale_factor + + return normalized_audio + +def inference(rank, ckpt_root, out_dir, queue: mp.Queue): + #print(f"thread {rank} start") + device = f"cuda:{rank}" + config = f"{ckpt_root}/model_bs_roformer_ep_317_sdr_12.9755.yaml" + ckpt = f"{ckpt_root}/model_bs_roformer_ep_317_sdr_12.9755.ckpt" + model, config = get_model_from_config("bs_roformer", config) + state_dict = torch.load(ckpt, map_location='cpu') + model.load_state_dict(state_dict) + model = model.to(device) + model.eval() + + + with torch.no_grad(): + while True: + #print(texts) + filename = queue.get() + if filename is None: + break + filepath = filename[0] + filename = filepath.split('/')[-1] + try: + mix, sr = librosa.load(filepath, sr=44100, mono=False) + #mix = normalize_audio(mix, -6) + mix = mix.T + if len(mix.shape) == 1: + mix = np.stack([mix, mix], axis=-1) + + mixture = torch.tensor(mix.T, dtype=torch.float32) + res = demix_track(config, model, mixture, device) + sf.write("{}/{}".format(os.path.join(out_dir, "vocal"), filename), res['vocals'].T.mean(-1), sr, subtype='FLOAT') + sf.write("{}/{}".format(os.path.join(out_dir, "bgm"), filename), mix.mean(-1) - res['vocals'].T.mean(-1), sr, subtype='FLOAT') + + + except Exception as e: + traceback.print_exc() + continue + + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + +last_batches = None + +@setInterval(3) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--filelist_or_dir", type=str, required=True, help="Path to save checkpoints") + parser.add_argument("--out_dir", type=str, required=True, help="Path to save checkpoints") + parser.add_argument("--ckpt_path", type=str, required=True, help="Path to save checkpoints") + parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints") + parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints") + parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints") + args = parser.parse_args() + + filelist_or_dir = args.filelist_or_dir + out_dir = args.out_dir + ckpt_path = args.ckpt_path + jobs = args.jobs + vad_jobs = jobs * 2 + + if os.path.isfile(filelist_or_dir): + filelist_name = filelist_or_dir.split('/')[-1].split('.')[0] + generator = open(filelist_or_dir).read().splitlines() + else: + filelist_name = "single" + generator = glob.glob(f"{filelist_or_dir}/*.wav") + + os.makedirs(os.path.join(out_dir, "vocal"), exist_ok=True) + os.makedirs(os.path.join(out_dir, "bgm"), exist_ok=True) + + + gpu_num = torch.cuda.device_count() + + processes = [] + vad_processes = [] + queue = mp.Queue() + vad_queue = mp.Queue() + for thread_num in range(jobs): + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, ckpt_path, out_dir, queue)) + p.start() + processes.append(p) + + accum = [] + + for filename in tqdm(generator): + accum.append(filename) + if len(accum) == 1: + queue.put(accum.copy()) + accum.clear() + + for _ in range(jobs): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc="seperation") + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() + + for p in vad_processes: + p.join() diff --git a/data_pipeline/seperation/inference_mp.sh b/data_pipeline/seperation/inference_mp.sh new file mode 100644 index 0000000000000000000000000000000000000000..0c5e2fede2638ab1914a3894fd50b93219033b28 --- /dev/null +++ b/data_pipeline/seperation/inference_mp.sh @@ -0,0 +1,7 @@ +python3 inference_mp.py \ + /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/wav \ + /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/vocal \ + /data/v-ziqianning/workspace/SingingTTS/data/youtube_testdata/bgm \ + /data/v-ziqianning/workspace/SingingTTS/data_pipeline/seperation/Music-Source-Separation-Training/ckpts/model_bs_roformer_ep_317_sdr_12.9755.yaml \ + /data/v-ziqianning/workspace/SingingTTS/data_pipeline/seperation/Music-Source-Separation-Training/ckpts/model_bs_roformer_ep_317_sdr_12.9755.ckpt \ + 4 \ No newline at end of file diff --git a/data_pipeline/seperation/models/bandit/core/__init__.py b/data_pipeline/seperation/models/bandit/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4d6d7953709c2f86a6b484e49c7715b58bbe86a --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/__init__.py @@ -0,0 +1,744 @@ +import os.path +from collections import defaultdict +from itertools import chain, combinations +from typing import ( + Any, + Dict, + Iterator, + Mapping, Optional, + Tuple, Type, + TypedDict +) + +import pytorch_lightning as pl +import torch +import torchaudio as ta +import torchmetrics as tm +from asteroid import losses as asteroid_losses +# from deepspeed.ops.adam import DeepSpeedCPUAdam +# from geoopt import optim as gooptim +from pytorch_lightning.utilities.types import STEP_OUTPUT +from torch import nn, optim +from torch.optim import lr_scheduler +from torch.optim.lr_scheduler import LRScheduler + +from models.bandit.core import loss, metrics as metrics_, model +from models.bandit.core.data._types import BatchedDataDict +from models.bandit.core.data.augmentation import BaseAugmentor, StemAugmentor +from models.bandit.core.utils import audio as audio_ +from models.bandit.core.utils.audio import BaseFader + +# from pandas.io.json._normalize import nested_to_record + +ConfigDict = TypedDict('ConfigDict', {'name': str, 'kwargs': Dict[str, Any]}) + + +class SchedulerConfigDict(ConfigDict): + monitor: str + + +OptimizerSchedulerConfigDict = TypedDict( + 'OptimizerSchedulerConfigDict', + {"optimizer": ConfigDict, "scheduler": SchedulerConfigDict}, + total=False +) + + +class LRSchedulerReturnDict(TypedDict, total=False): + scheduler: LRScheduler + monitor: str + + +class ConfigureOptimizerReturnDict(TypedDict, total=False): + optimizer: torch.optim.Optimizer + lr_scheduler: LRSchedulerReturnDict + + +OutputType = Dict[str, Any] +MetricsType = Dict[str, torch.Tensor] + + +def get_optimizer_class(name: str) -> Type[optim.Optimizer]: + + if name == "DeepSpeedCPUAdam": + return DeepSpeedCPUAdam + + for module in [optim, gooptim]: + if name in module.__dict__: + return module.__dict__[name] + + raise NameError + + +def parse_optimizer_config( + config: OptimizerSchedulerConfigDict, + parameters: Iterator[nn.Parameter] +) -> ConfigureOptimizerReturnDict: + optim_class = get_optimizer_class(config["optimizer"]["name"]) + optimizer = optim_class(parameters, **config["optimizer"]["kwargs"]) + + optim_dict: ConfigureOptimizerReturnDict = { + "optimizer": optimizer, + } + + if "scheduler" in config: + + lr_scheduler_class_ = config["scheduler"]["name"] + lr_scheduler_class = lr_scheduler.__dict__[lr_scheduler_class_] + lr_scheduler_dict: LRSchedulerReturnDict = { + "scheduler": lr_scheduler_class( + optimizer, + **config["scheduler"]["kwargs"] + ) + } + + if lr_scheduler_class_ == "ReduceLROnPlateau": + lr_scheduler_dict["monitor"] = config["scheduler"]["monitor"] + + optim_dict["lr_scheduler"] = lr_scheduler_dict + + return optim_dict + + +def parse_model_config(config: ConfigDict) -> Any: + name = config["name"] + + for module in [model]: + if name in module.__dict__: + return module.__dict__[name](**config["kwargs"]) + + raise NameError + + +_LEGACY_LOSS_NAMES = ["HybridL1Loss"] + + +def _parse_legacy_loss_config(config: ConfigDict) -> nn.Module: + name = config["name"] + + if name == "HybridL1Loss": + return loss.TimeFreqL1Loss(**config["kwargs"]) + + raise NameError + + +def parse_loss_config(config: ConfigDict) -> nn.Module: + name = config["name"] + + if name in _LEGACY_LOSS_NAMES: + return _parse_legacy_loss_config(config) + + for module in [loss, nn.modules.loss, asteroid_losses]: + if name in module.__dict__: + # print(config["kwargs"]) + return module.__dict__[name](**config["kwargs"]) + + raise NameError + + +def get_metric(config: ConfigDict) -> tm.Metric: + name = config["name"] + + for module in [tm, metrics_]: + if name in module.__dict__: + return module.__dict__[name](**config["kwargs"]) + raise NameError + + +def parse_metric_config(config: Dict[str, ConfigDict]) -> tm.MetricCollection: + metrics = {} + + for metric in config: + metrics[metric] = get_metric(config[metric]) + + return tm.MetricCollection(metrics) + + +def parse_fader_config(config: ConfigDict) -> BaseFader: + name = config["name"] + + for module in [audio_]: + if name in module.__dict__: + return module.__dict__[name](**config["kwargs"]) + + raise NameError + + +class LightningSystem(pl.LightningModule): + _VOX_STEMS = ["speech", "vocals"] + _BG_STEMS = ["background", "effects", "mne"] + + def __init__( + self, + config: Dict, + loss_adjustment: float = 1.0, + attach_fader: bool = False + ) -> None: + super().__init__() + self.optimizer_config = config["optimizer"] + self.model = parse_model_config(config["model"]) + self.loss = parse_loss_config(config["loss"]) + self.metrics = nn.ModuleDict( + { + stem: parse_metric_config(config["metrics"]["dev"]) + for stem in self.model.stems + } + ) + + self.metrics.disallow_fsdp = True + + self.test_metrics = nn.ModuleDict( + { + stem: parse_metric_config(config["metrics"]["test"]) + for stem in self.model.stems + } + ) + + self.test_metrics.disallow_fsdp = True + + self.fs = config["model"]["kwargs"]["fs"] + + self.fader_config = config["inference"]["fader"] + if attach_fader: + self.fader = parse_fader_config(config["inference"]["fader"]) + else: + self.fader = None + + self.augmentation: Optional[BaseAugmentor] + if config.get("augmentation", None) is not None: + self.augmentation = StemAugmentor(**config["augmentation"]) + else: + self.augmentation = None + + self.predict_output_path: Optional[str] = None + self.loss_adjustment = loss_adjustment + + self.val_prefix = None + self.test_prefix = None + + + def configure_optimizers(self) -> Any: + return parse_optimizer_config( + self.optimizer_config, + self.trainer.model.parameters() + ) + + def compute_loss(self, batch: BatchedDataDict, output: OutputType) -> Dict[ + str, torch.Tensor]: + return {"loss": self.loss(output, batch)} + + def update_metrics( + self, + batch: BatchedDataDict, + output: OutputType, + mode: str + ) -> None: + + if mode == "test": + metrics = self.test_metrics + else: + metrics = self.metrics + + for stem, metric in metrics.items(): + + if stem == "mne:+": + stem = "mne" + + # print(f"matching for {stem}") + if mode == "train": + metric.update( + output["audio"][stem],#.cpu(), + batch["audio"][stem],#.cpu() + ) + else: + if stem not in batch["audio"]: + matched = False + if stem in self._VOX_STEMS: + for bstem in self._VOX_STEMS: + if bstem in batch["audio"]: + batch["audio"][stem] = batch["audio"][bstem] + matched = True + break + elif stem in self._BG_STEMS: + for bstem in self._BG_STEMS: + if bstem in batch["audio"]: + batch["audio"][stem] = batch["audio"][bstem] + matched = True + break + else: + matched = True + + # print(batch["audio"].keys()) + + if matched: + # print(f"matched {stem}!") + if stem == "mne" and "mne" not in output["audio"]: + output["audio"]["mne"] = output["audio"]["music"] + output["audio"]["effects"] + + metric.update( + output["audio"][stem],#.cpu(), + batch["audio"][stem],#.cpu(), + ) + + # print(metric.compute()) + def compute_metrics(self, mode: str="dev") -> Dict[ + str, torch.Tensor]: + + if mode == "test": + metrics = self.test_metrics + else: + metrics = self.metrics + + metric_dict = {} + + for stem, metric in metrics.items(): + md = metric.compute() + metric_dict.update( + {f"{stem}/{k}": v for k, v in md.items()} + ) + + self.log_dict(metric_dict, prog_bar=True, logger=False) + + return metric_dict + + def reset_metrics(self, test_mode: bool = False) -> None: + + if test_mode: + metrics = self.test_metrics + else: + metrics = self.metrics + + for _, metric in metrics.items(): + metric.reset() + + + def forward(self, batch: BatchedDataDict) -> Any: + batch, output = self.model(batch) + + + return batch, output + + def common_step(self, batch: BatchedDataDict, mode: str) -> Any: + batch, output = self.forward(batch) + # print(batch) + # print(output) + loss_dict = self.compute_loss(batch, output) + + with torch.no_grad(): + self.update_metrics(batch, output, mode=mode) + + if mode == "train": + self.log("loss", loss_dict["loss"], prog_bar=True) + + return output, loss_dict + + + def training_step(self, batch: BatchedDataDict) -> Dict[str, Any]: + + if self.augmentation is not None: + with torch.no_grad(): + batch = self.augmentation(batch) + + _, loss_dict = self.common_step(batch, mode="train") + + with torch.inference_mode(): + self.log_dict_with_prefix( + loss_dict, + "train", + batch_size=batch["audio"]["mixture"].shape[0] + ) + + loss_dict["loss"] *= self.loss_adjustment + + return loss_dict + + def on_train_batch_end( + self, outputs: STEP_OUTPUT, batch: BatchedDataDict, batch_idx: int + ) -> None: + + metric_dict = self.compute_metrics() + self.log_dict_with_prefix(metric_dict, "train") + self.reset_metrics() + + def validation_step( + self, + batch: BatchedDataDict, + batch_idx: int, + dataloader_idx: int = 0 + ) -> Dict[str, Any]: + + with torch.inference_mode(): + curr_val_prefix = f"val{dataloader_idx}" if dataloader_idx > 0 else "val" + + if curr_val_prefix != self.val_prefix: + # print(f"Switching to validation dataloader {dataloader_idx}") + if self.val_prefix is not None: + self._on_validation_epoch_end() + self.val_prefix = curr_val_prefix + _, loss_dict = self.common_step(batch, mode="val") + + self.log_dict_with_prefix( + loss_dict, + self.val_prefix, + batch_size=batch["audio"]["mixture"].shape[0], + prog_bar=True, + add_dataloader_idx=False + ) + + return loss_dict + + def on_validation_epoch_end(self) -> None: + self._on_validation_epoch_end() + + def _on_validation_epoch_end(self) -> None: + metric_dict = self.compute_metrics() + self.log_dict_with_prefix(metric_dict, self.val_prefix, prog_bar=True, + add_dataloader_idx=False) + # self.logger.save() + # print(self.val_prefix, "Validation metrics:", metric_dict) + self.reset_metrics() + + + def old_predtest_step( + self, + batch: BatchedDataDict, + batch_idx: int, + dataloader_idx: int = 0 + ) -> Tuple[BatchedDataDict, OutputType]: + + audio_batch = batch["audio"]["mixture"] + track_batch = batch.get("track", ["" for _ in range(len(audio_batch))]) + + output_list_of_dicts = [ + self.fader( + audio[None, ...], + lambda a: self.test_forward(a, track) + ) + for audio, track in zip(audio_batch, track_batch) + ] + + output_dict_of_lists = defaultdict(list) + + for output_dict in output_list_of_dicts: + for stem, audio in output_dict.items(): + output_dict_of_lists[stem].append(audio) + + output = { + "audio": { + stem: torch.concat(output_list, dim=0) + for stem, output_list in output_dict_of_lists.items() + } + } + + return batch, output + + def predtest_step( + self, + batch: BatchedDataDict, + batch_idx: int = -1, + dataloader_idx: int = 0 + ) -> Tuple[BatchedDataDict, OutputType]: + + if getattr(self.model, "bypass_fader", False): + batch, output = self.model(batch) + else: + audio_batch = batch["audio"]["mixture"] + output = self.fader( + audio_batch, + lambda a: self.test_forward(a, "", batch=batch) + ) + + return batch, output + + def test_forward( + self, + audio: torch.Tensor, + track: str = "", + batch: BatchedDataDict = None + ) -> torch.Tensor: + + if self.fader is None: + self.attach_fader() + + cond = batch.get("condition", None) + + if cond is not None and cond.shape[0] == 1: + cond = cond.repeat(audio.shape[0], 1) + + _, output = self.forward( + {"audio": {"mixture": audio}, + "track": track, + "condition": cond, + } + ) # TODO: support track properly + + return output["audio"] + + def on_test_epoch_start(self) -> None: + self.attach_fader(force_reattach=True) + + def test_step( + self, + batch: BatchedDataDict, + batch_idx: int, + dataloader_idx: int = 0 + ) -> Any: + curr_test_prefix = f"test{dataloader_idx}" + + # print(batch["audio"].keys()) + + if curr_test_prefix != self.test_prefix: + # print(f"Switching to test dataloader {dataloader_idx}") + if self.test_prefix is not None: + self._on_test_epoch_end() + self.test_prefix = curr_test_prefix + + with torch.inference_mode(): + _, output = self.predtest_step(batch, batch_idx, dataloader_idx) + # print(output) + self.update_metrics(batch, output, mode="test") + + return output + + def on_test_epoch_end(self) -> None: + self._on_test_epoch_end() + + def _on_test_epoch_end(self) -> None: + metric_dict = self.compute_metrics(mode="test") + self.log_dict_with_prefix(metric_dict, self.test_prefix, prog_bar=True, + add_dataloader_idx=False) + # self.logger.save() + # print(self.test_prefix, "Test metrics:", metric_dict) + self.reset_metrics() + + def predict_step( + self, + batch: BatchedDataDict, + batch_idx: int = 0, + dataloader_idx: int = 0, + include_track_name: Optional[bool] = None, + get_no_vox_combinations: bool = True, + get_residual: bool = False, + treat_batch_as_channels: bool = False, + fs: Optional[int] = None, + ) -> Any: + assert self.predict_output_path is not None + + batch_size = batch["audio"]["mixture"].shape[0] + + if include_track_name is None: + include_track_name = batch_size > 1 + + with torch.inference_mode(): + batch, output = self.predtest_step(batch, batch_idx, dataloader_idx) + print('Pred test finished...') + torch.cuda.empty_cache() + metric_dict = {} + + if get_residual: + mixture = batch["audio"]["mixture"] + extracted = sum([output["audio"][stem] for stem in output["audio"]]) + residual = mixture - extracted + print(extracted.shape, mixture.shape, residual.shape) + + output["audio"]["residual"] = residual + + if get_no_vox_combinations: + no_vox_stems = [ + stem for stem in output["audio"] if + stem not in self._VOX_STEMS + ] + no_vox_combinations = chain.from_iterable( + combinations(no_vox_stems, r) for r in + range(2, len(no_vox_stems) + 1) + ) + + for combination in no_vox_combinations: + combination_ = list(combination) + output["audio"]["+".join(combination_)] = sum( + [output["audio"][stem] for stem in combination_] + ) + + if treat_batch_as_channels: + for stem in output["audio"]: + output["audio"][stem] = output["audio"][stem].reshape( + 1, -1, output["audio"][stem].shape[-1] + ) + batch_size = 1 + + for b in range(batch_size): + print("!!", b) + for stem in output["audio"]: + print(f"Saving audio for {stem} to {self.predict_output_path}") + track_name = batch["track"][b].split("/")[-1] + + if batch.get("audio", {}).get(stem, None) is not None: + self.test_metrics[stem].reset() + metrics = self.test_metrics[stem]( + batch["audio"][stem][[b], ...], + output["audio"][stem][[b], ...] + ) + snr = metrics["snr"] + sisnr = metrics["sisnr"] + sdr = metrics["sdr"] + metric_dict[stem] = metrics + print( + track_name, + f"snr={snr:2.2f} dB", + f"sisnr={sisnr:2.2f}", + f"sdr={sdr:2.2f} dB", + ) + filename = f"{stem} - snr={snr:2.2f}dB - sdr={sdr:2.2f}dB.wav" + else: + filename = f"{stem}.wav" + + if include_track_name: + output_dir = os.path.join( + self.predict_output_path, + track_name + ) + else: + output_dir = self.predict_output_path + + os.makedirs(output_dir, exist_ok=True) + + if fs is None: + fs = self.fs + + ta.save( + os.path.join(output_dir, filename), + output["audio"][stem][b, ...].cpu(), + fs, + ) + + return metric_dict + + def get_stems( + self, + batch: BatchedDataDict, + batch_idx: int = 0, + dataloader_idx: int = 0, + include_track_name: Optional[bool] = None, + get_no_vox_combinations: bool = True, + get_residual: bool = False, + treat_batch_as_channels: bool = False, + fs: Optional[int] = None, + ) -> Any: + assert self.predict_output_path is not None + + batch_size = batch["audio"]["mixture"].shape[0] + + if include_track_name is None: + include_track_name = batch_size > 1 + + with torch.inference_mode(): + batch, output = self.predtest_step(batch, batch_idx, dataloader_idx) + torch.cuda.empty_cache() + metric_dict = {} + + if get_residual: + mixture = batch["audio"]["mixture"] + extracted = sum([output["audio"][stem] for stem in output["audio"]]) + residual = mixture - extracted + # print(extracted.shape, mixture.shape, residual.shape) + + output["audio"]["residual"] = residual + + if get_no_vox_combinations: + no_vox_stems = [ + stem for stem in output["audio"] if + stem not in self._VOX_STEMS + ] + no_vox_combinations = chain.from_iterable( + combinations(no_vox_stems, r) for r in + range(2, len(no_vox_stems) + 1) + ) + + for combination in no_vox_combinations: + combination_ = list(combination) + output["audio"]["+".join(combination_)] = sum( + [output["audio"][stem] for stem in combination_] + ) + + if treat_batch_as_channels: + for stem in output["audio"]: + output["audio"][stem] = output["audio"][stem].reshape( + 1, -1, output["audio"][stem].shape[-1] + ) + batch_size = 1 + + result = {} + for b in range(batch_size): + for stem in output["audio"]: + track_name = batch["track"][b].split("/")[-1] + + if batch.get("audio", {}).get(stem, None) is not None: + self.test_metrics[stem].reset() + metrics = self.test_metrics[stem]( + batch["audio"][stem][[b], ...], + output["audio"][stem][[b], ...] + ) + snr = metrics["snr"] + sisnr = metrics["sisnr"] + sdr = metrics["sdr"] + metric_dict[stem] = metrics + print( + track_name, + f"snr={snr:2.2f} dB", + f"sisnr={sisnr:2.2f}", + f"sdr={sdr:2.2f} dB", + ) + filename = f"{stem} - snr={snr:2.2f}dB - sdr={sdr:2.2f}dB.wav" + else: + filename = f"{stem}.wav" + + if include_track_name: + output_dir = os.path.join( + self.predict_output_path, + track_name + ) + else: + output_dir = self.predict_output_path + + os.makedirs(output_dir, exist_ok=True) + + if fs is None: + fs = self.fs + + result[stem] = output["audio"][stem][b, ...].cpu().numpy() + + return result + + def load_state_dict( + self, state_dict: Mapping[str, Any], strict: bool = False + ) -> Any: + + return super().load_state_dict(state_dict, strict=False) + + + def set_predict_output_path(self, path: str) -> None: + self.predict_output_path = path + os.makedirs(self.predict_output_path, exist_ok=True) + + self.attach_fader() + + def attach_fader(self, force_reattach=False) -> None: + if self.fader is None or force_reattach: + self.fader = parse_fader_config(self.fader_config) + self.fader.to(self.device) + + + def log_dict_with_prefix( + self, + dict_: Dict[str, torch.Tensor], + prefix: str, + batch_size: Optional[int] = None, + **kwargs: Any + ) -> None: + self.log_dict( + {f"{prefix}/{k}": v for k, v in dict_.items()}, + batch_size=batch_size, + logger=True, + sync_dist=True, + **kwargs, + ) \ No newline at end of file diff --git a/data_pipeline/seperation/models/bandit/core/data/__init__.py b/data_pipeline/seperation/models/bandit/core/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1087fe2c4d7d3048295cdf73c0725a015bc0d129 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/__init__.py @@ -0,0 +1,2 @@ +from .dnr.datamodule import DivideAndRemasterDataModule +from .musdb.datamodule import MUSDB18DataModule \ No newline at end of file diff --git a/data_pipeline/seperation/models/bandit/core/data/_types.py b/data_pipeline/seperation/models/bandit/core/data/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..9499f9a80b5dec7b5b0e7882849e4f7b2c801ccf --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/_types.py @@ -0,0 +1,18 @@ +from typing import Dict, Sequence, TypedDict + +import torch + +AudioDict = Dict[str, torch.Tensor] + +DataDict = TypedDict('DataDict', {'audio': AudioDict, 'track': str}) + +BatchedDataDict = TypedDict( + 'BatchedDataDict', + {'audio': AudioDict, 'track': Sequence[str]} +) + + +class DataDictWithLanguage(TypedDict): + audio: AudioDict + track: str + language: str diff --git a/data_pipeline/seperation/models/bandit/core/data/augmentation.py b/data_pipeline/seperation/models/bandit/core/data/augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..238214bf17a69e71f48e8761e1ead05b17d0fa5a --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/augmentation.py @@ -0,0 +1,107 @@ +from abc import ABC +from typing import Any, Dict, Union + +import torch +import torch_audiomentations as tam +from torch import nn + +from models.bandit.core.data._types import BatchedDataDict, DataDict + + +class BaseAugmentor(nn.Module, ABC): + def forward(self, item: Union[DataDict, BatchedDataDict]) -> Union[ + DataDict, BatchedDataDict]: + raise NotImplementedError + + +class StemAugmentor(BaseAugmentor): + def __init__( + self, + audiomentations: Dict[str, Dict[str, Any]], + fix_clipping: bool = True, + scaler_margin: float = 0.5, + apply_both_default_and_common: bool = False, + ) -> None: + super().__init__() + + augmentations = {} + + self.has_default = "[default]" in audiomentations + self.has_common = "[common]" in audiomentations + self.apply_both_default_and_common = apply_both_default_and_common + + for stem in audiomentations: + if audiomentations[stem]["name"] == "Compose": + augmentations[stem] = getattr( + tam, + audiomentations[stem]["name"] + )( + [ + getattr(tam, aug["name"])(**aug["kwargs"]) + for aug in + audiomentations[stem]["kwargs"]["transforms"] + ], + **audiomentations[stem]["kwargs"]["kwargs"], + ) + else: + augmentations[stem] = getattr( + tam, + audiomentations[stem]["name"] + )( + **audiomentations[stem]["kwargs"] + ) + + self.augmentations = nn.ModuleDict(augmentations) + self.fix_clipping = fix_clipping + self.scaler_margin = scaler_margin + + def check_and_fix_clipping( + self, item: Union[DataDict, BatchedDataDict] + ) -> Union[DataDict, BatchedDataDict]: + max_abs = [] + + for stem in item["audio"]: + max_abs.append(item["audio"][stem].abs().max().item()) + + if max(max_abs) > 1.0: + scaler = 1.0 / (max(max_abs) + torch.rand( + (1,), + device=item["audio"]["mixture"].device + ) * self.scaler_margin) + + for stem in item["audio"]: + item["audio"][stem] *= scaler + + return item + + def forward(self, item: Union[DataDict, BatchedDataDict]) -> Union[ + DataDict, BatchedDataDict]: + + for stem in item["audio"]: + if stem == "mixture": + continue + + if self.has_common: + item["audio"][stem] = self.augmentations["[common]"]( + item["audio"][stem] + ).samples + + if stem in self.augmentations: + item["audio"][stem] = self.augmentations[stem]( + item["audio"][stem] + ).samples + elif self.has_default: + if not self.has_common or self.apply_both_default_and_common: + item["audio"][stem] = self.augmentations["[default]"]( + item["audio"][stem] + ).samples + + item["audio"]["mixture"] = sum( + [item["audio"][stem] for stem in item["audio"] + if stem != "mixture"] + ) # type: ignore[call-overload, assignment] + + if self.fix_clipping: + item = self.check_and_fix_clipping(item) + + return item diff --git a/data_pipeline/seperation/models/bandit/core/data/augmented.py b/data_pipeline/seperation/models/bandit/core/data/augmented.py new file mode 100644 index 0000000000000000000000000000000000000000..84d19599a6579eb5afd304ef6da76a6cbca49045 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/augmented.py @@ -0,0 +1,35 @@ +import warnings +from typing import Dict, Optional, Union + +import torch +from torch import nn +from torch.utils import data + + +class AugmentedDataset(data.Dataset): + def __init__( + self, + dataset: data.Dataset, + augmentation: nn.Module = nn.Identity(), + target_length: Optional[int] = None, + ) -> None: + warnings.warn( + "This class is no longer used. Attach augmentation to " + "the LightningSystem instead.", + DeprecationWarning, + ) + + self.dataset = dataset + self.augmentation = augmentation + + self.ds_length: int = len(dataset) # type: ignore[arg-type] + self.length = target_length if target_length is not None else self.ds_length + + def __getitem__(self, index: int) -> Dict[str, Union[str, Dict[str, + torch.Tensor]]]: + item = self.dataset[index % self.ds_length] + item = self.augmentation(item) + return item + + def __len__(self) -> int: + return self.length diff --git a/data_pipeline/seperation/models/bandit/core/data/base.py b/data_pipeline/seperation/models/bandit/core/data/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a7b6c33a85b93c32209138e3d21bfc8e0f270cac --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/base.py @@ -0,0 +1,69 @@ +import os +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional + +import numpy as np +import pedalboard as pb +import torch +import torchaudio as ta +from torch.utils import data + +from models.bandit.core.data._types import AudioDict, DataDict + + +class BaseSourceSeparationDataset(data.Dataset, ABC): + def __init__( + self, split: str, + stems: List[str], + files: List[str], + data_path: str, + fs: int, + npy_memmap: bool, + recompute_mixture: bool + ): + self.split = split + self.stems = stems + self.stems_no_mixture = [s for s in stems if s != "mixture"] + self.files = files + self.data_path = data_path + self.fs = fs + self.npy_memmap = npy_memmap + self.recompute_mixture = recompute_mixture + + @abstractmethod + def get_stem( + self, + *, + stem: str, + identifier: Dict[str, Any] + ) -> torch.Tensor: + raise NotImplementedError + + def _get_audio(self, stems, identifier: Dict[str, Any]): + audio = {} + for stem in stems: + audio[stem] = self.get_stem(stem=stem, identifier=identifier) + + return audio + + def get_audio(self, identifier: Dict[str, Any]) -> AudioDict: + + if self.recompute_mixture: + audio = self._get_audio( + self.stems_no_mixture, + identifier=identifier + ) + audio["mixture"] = self.compute_mixture(audio) + return audio + else: + return self._get_audio(self.stems, identifier=identifier) + + @abstractmethod + def get_identifier(self, index: int) -> Dict[str, Any]: + pass + + def compute_mixture(self, audio: AudioDict) -> torch.Tensor: + + return sum( + audio[stem] for stem in audio if stem != "mixture" + ) diff --git a/data_pipeline/seperation/models/bandit/core/data/dnr/__init__.py b/data_pipeline/seperation/models/bandit/core/data/dnr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data_pipeline/seperation/models/bandit/core/data/dnr/datamodule.py b/data_pipeline/seperation/models/bandit/core/data/dnr/datamodule.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5550608aabf460eb1781576112ed60185dd318 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/dnr/datamodule.py @@ -0,0 +1,74 @@ +import os +from typing import Mapping, Optional + +import pytorch_lightning as pl + +from .dataset import ( + DivideAndRemasterDataset, + DivideAndRemasterDeterministicChunkDataset, + DivideAndRemasterRandomChunkDataset, + DivideAndRemasterRandomChunkDatasetWithSpeechReverb +) + + +def DivideAndRemasterDataModule( + data_root: str = "$DATA_ROOT/DnR/v2", + batch_size: int = 2, + num_workers: int = 8, + train_kwargs: Optional[Mapping] = None, + val_kwargs: Optional[Mapping] = None, + test_kwargs: Optional[Mapping] = None, + datamodule_kwargs: Optional[Mapping] = None, + use_speech_reverb: bool = False + # augmentor=None +) -> pl.LightningDataModule: + if train_kwargs is None: + train_kwargs = {} + + if val_kwargs is None: + val_kwargs = {} + + if test_kwargs is None: + test_kwargs = {} + + if datamodule_kwargs is None: + datamodule_kwargs = {} + + if num_workers is None: + num_workers = os.cpu_count() + + if num_workers is None: + num_workers = 32 + + num_workers = min(num_workers, 64) + + if use_speech_reverb: + train_cls = DivideAndRemasterRandomChunkDatasetWithSpeechReverb + else: + train_cls = DivideAndRemasterRandomChunkDataset + + train_dataset = train_cls( + data_root, "train", **train_kwargs + ) + + # if augmentor is not None: + # train_dataset = AugmentedDataset(train_dataset, augmentor) + + datamodule = pl.LightningDataModule.from_datasets( + train_dataset=train_dataset, + val_dataset=DivideAndRemasterDeterministicChunkDataset( + data_root, "val", **val_kwargs + ), + test_dataset=DivideAndRemasterDataset( + data_root, + "test", + **test_kwargs + ), + batch_size=batch_size, + num_workers=num_workers, + **datamodule_kwargs + ) + + datamodule.predict_dataloader = datamodule.test_dataloader # type: ignore[method-assign] + + return datamodule diff --git a/data_pipeline/seperation/models/bandit/core/data/dnr/dataset.py b/data_pipeline/seperation/models/bandit/core/data/dnr/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..639290d8ae97951e22fac8523fb44740765212b8 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/dnr/dataset.py @@ -0,0 +1,392 @@ +import os +from abc import ABC +from typing import Any, Dict, List, Optional + +import numpy as np +import pedalboard as pb +import torch +import torchaudio as ta +from torch.utils import data + +from models.bandit.core.data._types import AudioDict, DataDict +from models.bandit.core.data.base import BaseSourceSeparationDataset + + +class DivideAndRemasterBaseDataset(BaseSourceSeparationDataset, ABC): + ALLOWED_STEMS = ["mixture", "speech", "music", "effects", "mne"] + STEM_NAME_MAP = { + "mixture": "mix", + "speech": "speech", + "music": "music", + "effects": "sfx", + } + SPLIT_NAME_MAP = {"train": "tr", "val": "cv", "test": "tt"} + + FULL_TRACK_LENGTH_SECOND = 60 + FULL_TRACK_LENGTH_SAMPLES = FULL_TRACK_LENGTH_SECOND * 44100 + + def __init__( + self, + split: str, + stems: List[str], + files: List[str], + data_path: str, + fs: int = 44100, + npy_memmap: bool = True, + recompute_mixture: bool = False, + ) -> None: + super().__init__( + split=split, + stems=stems, + files=files, + data_path=data_path, + fs=fs, + npy_memmap=npy_memmap, + recompute_mixture=recompute_mixture + ) + + def get_stem( + self, + *, + stem: str, + identifier: Dict[str, Any] + ) -> torch.Tensor: + + if stem == "mne": + return self.get_stem( + stem="music", + identifier=identifier) + self.get_stem( + stem="effects", + identifier=identifier) + + track = identifier["track"] + path = os.path.join(self.data_path, track) + + if self.npy_memmap: + audio = np.load( + os.path.join(path, f"{self.STEM_NAME_MAP[stem]}.npy"), + mmap_mode="r" + ) + else: + # noinspection PyUnresolvedReferences + audio, _ = ta.load( + os.path.join(path, f"{self.STEM_NAME_MAP[stem]}.wav") + ) + + return audio + + def get_identifier(self, index): + return dict(track=self.files[index]) + + def __getitem__(self, index: int) -> DataDict: + identifier = self.get_identifier(index) + audio = self.get_audio(identifier) + + return {"audio": audio, "track": f"{self.split}/{identifier['track']}"} + + +class DivideAndRemasterDataset(DivideAndRemasterBaseDataset): + def __init__( + self, + data_root: str, + split: str, + stems: Optional[List[str]] = None, + fs: int = 44100, + npy_memmap: bool = True, + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + self.stems = stems + + data_path = os.path.join(data_root, self.SPLIT_NAME_MAP[split]) + + files = sorted(os.listdir(data_path)) + files = [ + f + for f in files + if (not f.startswith(".")) and os.path.isdir( + os.path.join(data_path, f) + ) + ] + # pprint(list(enumerate(files))) + if split == "train": + assert len(files) == 3406, len(files) + elif split == "val": + assert len(files) == 487, len(files) + elif split == "test": + assert len(files) == 973, len(files) + + self.n_tracks = len(files) + + super().__init__( + data_path=data_path, + split=split, + stems=stems, + files=files, + fs=fs, + npy_memmap=npy_memmap, + ) + + def __len__(self) -> int: + return self.n_tracks + + +class DivideAndRemasterRandomChunkDataset(DivideAndRemasterBaseDataset): + def __init__( + self, + data_root: str, + split: str, + target_length: int, + chunk_size_second: float, + stems: Optional[List[str]] = None, + fs: int = 44100, + npy_memmap: bool = True, + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + self.stems = stems + + data_path = os.path.join(data_root, self.SPLIT_NAME_MAP[split]) + + files = sorted(os.listdir(data_path)) + files = [ + f + for f in files + if (not f.startswith(".")) and os.path.isdir( + os.path.join(data_path, f) + ) + ] + + if split == "train": + assert len(files) == 3406, len(files) + elif split == "val": + assert len(files) == 487, len(files) + elif split == "test": + assert len(files) == 973, len(files) + + self.n_tracks = len(files) + + self.target_length = target_length + self.chunk_size = int(chunk_size_second * fs) + + super().__init__( + data_path=data_path, + split=split, + stems=stems, + files=files, + fs=fs, + npy_memmap=npy_memmap, + ) + + def __len__(self) -> int: + return self.target_length + + def get_identifier(self, index): + return super().get_identifier(index % self.n_tracks) + + def get_stem( + self, + *, + stem: str, + identifier: Dict[str, Any], + chunk_here: bool = False, + ) -> torch.Tensor: + + stem = super().get_stem( + stem=stem, + identifier=identifier + ) + + if chunk_here: + start = np.random.randint( + 0, + self.FULL_TRACK_LENGTH_SAMPLES - self.chunk_size + ) + end = start + self.chunk_size + + stem = stem[:, start:end] + + return stem + + def __getitem__(self, index: int) -> DataDict: + identifier = self.get_identifier(index) + # self.index_lock = index + audio = self.get_audio(identifier) + # self.index_lock = None + + start = np.random.randint( + 0, + self.FULL_TRACK_LENGTH_SAMPLES - self.chunk_size + ) + end = start + self.chunk_size + + audio = { + k: v[:, start:end] for k, v in audio.items() + } + + return {"audio": audio, "track": f"{self.split}/{identifier['track']}"} + + +class DivideAndRemasterDeterministicChunkDataset(DivideAndRemasterBaseDataset): + def __init__( + self, + data_root: str, + split: str, + chunk_size_second: float, + hop_size_second: float, + stems: Optional[List[str]] = None, + fs: int = 44100, + npy_memmap: bool = True, + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + self.stems = stems + + data_path = os.path.join(data_root, self.SPLIT_NAME_MAP[split]) + + files = sorted(os.listdir(data_path)) + files = [ + f + for f in files + if (not f.startswith(".")) and os.path.isdir( + os.path.join(data_path, f) + ) + ] + # pprint(list(enumerate(files))) + if split == "train": + assert len(files) == 3406, len(files) + elif split == "val": + assert len(files) == 487, len(files) + elif split == "test": + assert len(files) == 973, len(files) + + self.n_tracks = len(files) + + self.chunk_size = int(chunk_size_second * fs) + self.hop_size = int(hop_size_second * fs) + self.n_chunks_per_track = int( + ( + self.FULL_TRACK_LENGTH_SECOND - chunk_size_second) / hop_size_second + ) + + self.length = self.n_tracks * self.n_chunks_per_track + + super().__init__( + data_path=data_path, + split=split, + stems=stems, + files=files, + fs=fs, + npy_memmap=npy_memmap, + ) + + def get_identifier(self, index): + return super().get_identifier(index % self.n_tracks) + + def __len__(self) -> int: + return self.length + + def __getitem__(self, item: int) -> DataDict: + + index = item % self.n_tracks + chunk = item // self.n_tracks + + data_ = super().__getitem__(index) + + audio = data_["audio"] + + start = chunk * self.hop_size + end = start + self.chunk_size + + for stem in self.stems: + data_["audio"][stem] = audio[stem][:, start:end] + + return data_ + + +class DivideAndRemasterRandomChunkDatasetWithSpeechReverb( + DivideAndRemasterRandomChunkDataset +): + def __init__( + self, + data_root: str, + split: str, + target_length: int, + chunk_size_second: float, + stems: Optional[List[str]] = None, + fs: int = 44100, + npy_memmap: bool = True, + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + + stems_no_mixture = [s for s in stems if s != "mixture"] + + super().__init__( + data_root=data_root, + split=split, + target_length=target_length, + chunk_size_second=chunk_size_second, + stems=stems_no_mixture, + fs=fs, + npy_memmap=npy_memmap, + ) + + self.stems = stems + self.stems_no_mixture = stems_no_mixture + + def __getitem__(self, index: int) -> DataDict: + + data_ = super().__getitem__(index) + + dry = data_["audio"]["speech"][:] + n_samples = dry.shape[-1] + + wet_level = np.random.rand() + + speech = pb.Reverb( + room_size=np.random.rand(), + damping=np.random.rand(), + wet_level=wet_level, + dry_level=(1 - wet_level), + width=np.random.rand() + ).process(dry, self.fs, buffer_size=8192 * 4)[..., :n_samples] + + data_["audio"]["speech"] = speech + + data_["audio"]["mixture"] = sum( + [data_["audio"][s] for s in self.stems_no_mixture] + ) + + return data_ + + def __len__(self) -> int: + return super().__len__() + + +if __name__ == "__main__": + + from pprint import pprint + from tqdm import tqdm + + for split_ in ["train", "val", "test"]: + ds = DivideAndRemasterRandomChunkDatasetWithSpeechReverb( + data_root="$DATA_ROOT/DnR/v2np", + split=split_, + target_length=100, + chunk_size_second=6.0 + ) + + print(split_, len(ds)) + + for track_ in tqdm(ds): # type: ignore + pprint(track_) + track_["audio"] = {k: v.shape for k, v in track_["audio"].items()} + pprint(track_) + # break + + break diff --git a/data_pipeline/seperation/models/bandit/core/data/dnr/preprocess.py b/data_pipeline/seperation/models/bandit/core/data/dnr/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..9d0b58690f3bae726b0655dbade6393c89bf8c9e --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/dnr/preprocess.py @@ -0,0 +1,54 @@ +import glob +import os +from typing import Tuple + +import numpy as np +import torchaudio as ta +from tqdm.contrib.concurrent import process_map + + +def process_one(inputs: Tuple[str, str, int]) -> None: + infile, outfile, target_fs = inputs + + dir = os.path.dirname(outfile) + os.makedirs(dir, exist_ok=True) + + data, fs = ta.load(infile) + + if fs != target_fs: + data = ta.functional.resample(data, fs, target_fs, resampling_method="sinc_interp_kaiser") + fs = target_fs + + data = data.numpy() + data = data.astype(np.float32) + + if os.path.exists(outfile): + data_ = np.load(outfile) + if np.allclose(data, data_): + return + + np.save(outfile, data) + + +def preprocess( + data_path: str, + output_path: str, + fs: int +) -> None: + files = glob.glob(os.path.join(data_path, "**", "*.wav"), recursive=True) + print(files) + outfiles = [ + f.replace(data_path, output_path).replace(".wav", ".npy") for f in + files + ] + + os.makedirs(output_path, exist_ok=True) + inputs = list(zip(files, outfiles, [fs] * len(files))) + + process_map(process_one, inputs, chunksize=32) + + +if __name__ == "__main__": + import fire + + fire.Fire() diff --git a/data_pipeline/seperation/models/bandit/core/data/musdb/__init__.py b/data_pipeline/seperation/models/bandit/core/data/musdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data_pipeline/seperation/models/bandit/core/data/musdb/datamodule.py b/data_pipeline/seperation/models/bandit/core/data/musdb/datamodule.py new file mode 100644 index 0000000000000000000000000000000000000000..a8984daebd535b25f0551d348c91dbd1702fb9da --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/musdb/datamodule.py @@ -0,0 +1,77 @@ +import os.path +from typing import Mapping, Optional + +import pytorch_lightning as pl + +from models.bandit.core.data.musdb.dataset import ( + MUSDB18BaseDataset, + MUSDB18FullTrackDataset, + MUSDB18SadDataset, + MUSDB18SadOnTheFlyAugmentedDataset +) + + +def MUSDB18DataModule( + data_root: str = "$DATA_ROOT/MUSDB18/HQ", + target_stem: str = "vocals", + batch_size: int = 2, + num_workers: int = 8, + train_kwargs: Optional[Mapping] = None, + val_kwargs: Optional[Mapping] = None, + test_kwargs: Optional[Mapping] = None, + datamodule_kwargs: Optional[Mapping] = None, + use_on_the_fly: bool = True, + npy_memmap: bool = True +) -> pl.LightningDataModule: + if train_kwargs is None: + train_kwargs = {} + + if val_kwargs is None: + val_kwargs = {} + + if test_kwargs is None: + test_kwargs = {} + + if datamodule_kwargs is None: + datamodule_kwargs = {} + + train_dataset: MUSDB18BaseDataset + + if use_on_the_fly: + train_dataset = MUSDB18SadOnTheFlyAugmentedDataset( + data_root=os.path.join(data_root, "saded-np"), + split="train", + target_stem=target_stem, + **train_kwargs + ) + else: + train_dataset = MUSDB18SadDataset( + data_root=os.path.join(data_root, "saded-np"), + split="train", + target_stem=target_stem, + **train_kwargs + ) + + datamodule = pl.LightningDataModule.from_datasets( + train_dataset=train_dataset, + val_dataset=MUSDB18SadDataset( + data_root=os.path.join(data_root, "saded-np"), + split="val", + target_stem=target_stem, + **val_kwargs + ), + test_dataset=MUSDB18FullTrackDataset( + data_root=os.path.join(data_root, "canonical"), + split="test", + **test_kwargs + ), + batch_size=batch_size, + num_workers=num_workers, + **datamodule_kwargs + ) + + datamodule.predict_dataloader = ( # type: ignore[method-assign] + datamodule.test_dataloader + ) + + return datamodule diff --git a/data_pipeline/seperation/models/bandit/core/data/musdb/dataset.py b/data_pipeline/seperation/models/bandit/core/data/musdb/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..c59a07d06741291fb20c48402abfb45642007bf3 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/musdb/dataset.py @@ -0,0 +1,280 @@ +import os +from abc import ABC +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torchaudio as ta +from torch.utils import data + +from models.bandit.core.data._types import AudioDict, DataDict +from models.bandit.core.data.base import BaseSourceSeparationDataset + + +class MUSDB18BaseDataset(BaseSourceSeparationDataset, ABC): + + ALLOWED_STEMS = ["mixture", "vocals", "bass", "drums", "other"] + + def __init__( + self, + split: str, + stems: List[str], + files: List[str], + data_path: str, + fs: int = 44100, + npy_memmap=False, + ) -> None: + super().__init__( + split=split, + stems=stems, + files=files, + data_path=data_path, + fs=fs, + npy_memmap=npy_memmap, + recompute_mixture=False + ) + + def get_stem(self, *, stem: str, identifier) -> torch.Tensor: + track = identifier["track"] + path = os.path.join(self.data_path, track) + # noinspection PyUnresolvedReferences + + if self.npy_memmap: + audio = np.load(os.path.join(path, f"{stem}.wav.npy"), mmap_mode="r") + else: + audio, _ = ta.load(os.path.join(path, f"{stem}.wav")) + + return audio + + def get_identifier(self, index): + return dict(track=self.files[index]) + + def __getitem__(self, index: int) -> DataDict: + identifier = self.get_identifier(index) + audio = self.get_audio(identifier) + + return {"audio": audio, "track": f"{self.split}/{identifier['track']}"} + + +class MUSDB18FullTrackDataset(MUSDB18BaseDataset): + + N_TRAIN_TRACKS = 100 + N_TEST_TRACKS = 50 + VALIDATION_FILES = [ + "Actions - One Minute Smile", + "Clara Berry And Wooldog - Waltz For My Victims", + "Johnny Lokke - Promises & Lies", + "Patrick Talbot - A Reason To Leave", + "Triviul - Angelsaint", + "Alexander Ross - Goodbye Bolero", + "Fergessen - Nos Palpitants", + "Leaf - Summerghost", + "Skelpolu - Human Mistakes", + "Young Griffo - Pennies", + "ANiMAL - Rockshow", + "James May - On The Line", + "Meaxic - Take A Step", + "Traffic Experiment - Sirens", + ] + + def __init__( + self, data_root: str, split: str, stems: Optional[List[ + str]] = None + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + self.stems = stems + + if split == "test": + subset = "test" + elif split in ["train", "val"]: + subset = "train" + else: + raise NameError + + data_path = os.path.join(data_root, subset) + + files = sorted(os.listdir(data_path)) + files = [f for f in files if not f.startswith(".")] + # pprint(list(enumerate(files))) + if subset == "train": + assert len(files) == 100, len(files) + if split == "train": + files = [f for f in files if f not in self.VALIDATION_FILES] + assert len(files) == 100 - len(self.VALIDATION_FILES) + else: + files = [f for f in files if f in self.VALIDATION_FILES] + assert len(files) == len(self.VALIDATION_FILES) + else: + split = "test" + assert len(files) == 50 + + self.n_tracks = len(files) + + super().__init__( + data_path=data_path, + split=split, + stems=stems, + files=files + ) + + def __len__(self) -> int: + return self.n_tracks + +class MUSDB18SadDataset(MUSDB18BaseDataset): + def __init__( + self, + data_root: str, + split: str, + target_stem: str, + stems: Optional[List[str]] = None, + target_length: Optional[int] = None, + npy_memmap=False, + ) -> None: + + if stems is None: + stems = self.ALLOWED_STEMS + + data_path = os.path.join(data_root, target_stem, split) + + files = sorted(os.listdir(data_path)) + files = [f for f in files if not f.startswith(".")] + + super().__init__( + data_path=data_path, + split=split, + stems=stems, + files=files, + npy_memmap=npy_memmap + ) + self.n_segments = len(files) + self.target_stem = target_stem + self.target_length = ( + target_length if target_length is not None else self.n_segments + ) + + def __len__(self) -> int: + return self.target_length + + def __getitem__(self, index: int) -> DataDict: + + index = index % self.n_segments + + return super().__getitem__(index) + + def get_identifier(self, index): + return super().get_identifier(index % self.n_segments) + + +class MUSDB18SadOnTheFlyAugmentedDataset(MUSDB18SadDataset): + def __init__( + self, + data_root: str, + split: str, + target_stem: str, + stems: Optional[List[str]] = None, + target_length: int = 20000, + apply_probability: Optional[float] = None, + chunk_size_second: float = 3.0, + random_scale_range_db: Tuple[float, float] = (-10, 10), + drop_probability: float = 0.1, + rescale: bool = True, + ) -> None: + super().__init__(data_root, split, target_stem, stems) + + if apply_probability is None: + apply_probability = ( + target_length - self.n_segments) / target_length + + self.apply_probability = apply_probability + self.drop_probability = drop_probability + self.chunk_size_second = chunk_size_second + self.random_scale_range_db = random_scale_range_db + self.rescale = rescale + + self.chunk_size_sample = int(self.chunk_size_second * self.fs) + self.target_length = target_length + + def __len__(self) -> int: + return self.target_length + + def __getitem__(self, index: int) -> DataDict: + + index = index % self.n_segments + + # if np.random.rand() > self.apply_probability: + # return super().__getitem__(index) + + audio = {} + identifier = self.get_identifier(index) + + # assert self.target_stem in self.stems_no_mixture + for stem in self.stems_no_mixture: + if stem == self.target_stem: + identifier_ = identifier + else: + if np.random.rand() < self.apply_probability: + index_ = np.random.randint(self.n_segments) + identifier_ = self.get_identifier(index_) + else: + identifier_ = identifier + + audio[stem] = self.get_stem(stem=stem, identifier=identifier_) + + # if stem == self.target_stem: + + if self.chunk_size_sample < audio[stem].shape[-1]: + chunk_start = np.random.randint( + audio[stem].shape[-1] - self.chunk_size_sample + ) + else: + chunk_start = 0 + + if np.random.rand() < self.drop_probability: + # db_scale = "-inf" + linear_scale = 0.0 + else: + db_scale = np.random.uniform(*self.random_scale_range_db) + linear_scale = np.power(10, db_scale / 20) + # db_scale = f"{db_scale:+2.1f}" + # print(linear_scale) + audio[stem][..., + chunk_start: chunk_start + self.chunk_size_sample] = ( + linear_scale + * audio[stem][..., + chunk_start: chunk_start + self.chunk_size_sample] + ) + + audio["mixture"] = self.compute_mixture(audio) + + if self.rescale: + max_abs_val = max( + [torch.max(torch.abs(audio[stem])) for stem in self.stems] + ) # type: ignore[type-var] + if max_abs_val > 1: + audio = {k: v / max_abs_val for k, v in audio.items()} + + track = identifier["track"] + + return {"audio": audio, "track": f"{self.split}/{track}"} + +# if __name__ == "__main__": +# +# from pprint import pprint +# from tqdm import tqdm +# +# for split_ in ["train", "val", "test"]: +# ds = MUSDB18SadOnTheFlyAugmentedDataset( +# data_root="$DATA_ROOT/MUSDB18/HQ/saded", +# split=split_, +# target_stem="vocals" +# ) +# +# print(split_, len(ds)) +# +# for track_ in tqdm(ds): +# track_["audio"] = { +# k: v.shape for k, v in track_["audio"].items() +# } +# pprint(track_) diff --git a/data_pipeline/seperation/models/bandit/core/data/musdb/preprocess.py b/data_pipeline/seperation/models/bandit/core/data/musdb/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..45b3fe4037235458dba5c7d17c9d0947eb21d0c5 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/musdb/preprocess.py @@ -0,0 +1,238 @@ +import glob +import os + +import numpy as np +import torch +import torchaudio as ta +from torch import nn +from torch.nn import functional as F +from tqdm.contrib.concurrent import process_map + +from core.data._types import DataDict +from core.data.musdb.dataset import MUSDB18FullTrackDataset +import pyloudnorm as pyln + +class SourceActivityDetector(nn.Module): + def __init__( + self, + analysis_stem: str, + output_path: str, + fs: int = 44100, + segment_length_second: float = 6.0, + hop_length_second: float = 3.0, + n_chunks: int = 10, + chunk_epsilon: float = 1e-5, + energy_threshold_quantile: float = 0.15, + segment_epsilon: float = 1e-3, + salient_proportion_threshold: float = 0.5, + target_lufs: float = -24 + ) -> None: + super().__init__() + + self.fs = fs + self.segment_length = int(segment_length_second * self.fs) + self.hop_length = int(hop_length_second * self.fs) + self.n_chunks = n_chunks + assert self.segment_length % self.n_chunks == 0 + self.chunk_size = self.segment_length // self.n_chunks + self.chunk_epsilon = chunk_epsilon + self.energy_threshold_quantile = energy_threshold_quantile + self.segment_epsilon = segment_epsilon + self.salient_proportion_threshold = salient_proportion_threshold + self.analysis_stem = analysis_stem + + self.meter = pyln.Meter(self.fs) + self.target_lufs = target_lufs + + self.output_path = output_path + + def forward(self, data: DataDict) -> None: + + stem_ = self.analysis_stem if ( + self.analysis_stem != "none") else "mixture" + + x = data["audio"][stem_] + + xnp = x.numpy() + loudness = self.meter.integrated_loudness(xnp.T) + + for stem in data["audio"]: + s = data["audio"][stem] + s = pyln.normalize.loudness(s.numpy().T, loudness, self.target_lufs).T + s = torch.as_tensor(s) + data["audio"][stem] = s + + if x.ndim == 3: + assert x.shape[0] == 1 + x = x[0] + + n_chan, n_samples = x.shape + + n_segments = ( + int( + np.ceil((n_samples - self.segment_length) / self.hop_length) + ) + 1 + ) + + segments = torch.zeros((n_segments, n_chan, self.segment_length)) + for i in range(n_segments): + start = i * self.hop_length + end = start + self.segment_length + end = min(end, n_samples) + + xseg = x[:, start:end] + + if end - start < self.segment_length: + xseg = F.pad( + xseg, + pad=(0, self.segment_length - (end - start)), + value=torch.nan + ) + + segments[i, :, :] = xseg + + chunks = segments.reshape( + (n_segments, n_chan, self.n_chunks, self.chunk_size) + ) + + if self.analysis_stem != "none": + chunk_energies = torch.mean(torch.square(chunks), dim=(1, 3)) + chunk_energies = torch.nan_to_num(chunk_energies, nan=0) + chunk_energies[chunk_energies == 0] = self.chunk_epsilon + + energy_threshold = torch.nanquantile( + chunk_energies, q=self.energy_threshold_quantile + ) + + if energy_threshold < self.segment_epsilon: + energy_threshold = self.segment_epsilon # type: ignore[assignment] + + chunks_above_threshold = chunk_energies > energy_threshold + n_chunks_above_threshold = torch.mean( + chunks_above_threshold.to(torch.float), dim=-1 + ) + + segment_above_threshold = ( + n_chunks_above_threshold > self.salient_proportion_threshold + ) + + if torch.sum(segment_above_threshold) == 0: + return + + else: + segment_above_threshold = torch.ones((n_segments,)) + + for i in range(n_segments): + if not segment_above_threshold[i]: + continue + + outpath = os.path.join( + self.output_path, + self.analysis_stem, + f"{data['track']} - {self.analysis_stem}{i:03d}", + ) + os.makedirs(outpath, exist_ok=True) + + for stem in data["audio"]: + if stem == self.analysis_stem: + segment = torch.nan_to_num(segments[i, :, :], nan=0) + else: + start = i * self.hop_length + end = start + self.segment_length + end = min(n_samples, end) + + segment = data["audio"][stem][:, start:end] + + if end - start < self.segment_length: + segment = F.pad( + segment, + (0, self.segment_length - (end - start)) + ) + + assert segment.shape[-1] == self.segment_length, segment.shape + + # ta.save(os.path.join(outpath, f"{stem}.wav"), segment, self.fs) + + np.save(os.path.join(outpath, f"{stem}.wav"), segment) + + +def preprocess( + analysis_stem: str, + output_path: str = "/data/MUSDB18/HQ/saded-np", + fs: int = 44100, + segment_length_second: float = 6.0, + hop_length_second: float = 3.0, + n_chunks: int = 10, + chunk_epsilon: float = 1e-5, + energy_threshold_quantile: float = 0.15, + segment_epsilon: float = 1e-3, + salient_proportion_threshold: float = 0.5, +) -> None: + + sad = SourceActivityDetector( + analysis_stem=analysis_stem, + output_path=output_path, + fs=fs, + segment_length_second=segment_length_second, + hop_length_second=hop_length_second, + n_chunks=n_chunks, + chunk_epsilon=chunk_epsilon, + energy_threshold_quantile=energy_threshold_quantile, + segment_epsilon=segment_epsilon, + salient_proportion_threshold=salient_proportion_threshold, + ) + + for split in ["train", "val", "test"]: + ds = MUSDB18FullTrackDataset( + data_root="/data/MUSDB18/HQ/canonical", + split=split, + ) + + tracks = [] + for i, track in enumerate(tqdm(ds, total=len(ds))): + if i % 32 == 0 and tracks: + process_map(sad, tracks, max_workers=8) + tracks = [] + tracks.append(track) + process_map(sad, tracks, max_workers=8) + +def loudness_norm_one( + inputs +): + infile, outfile, target_lufs = inputs + + audio, fs = ta.load(infile) + audio = audio.mean(dim=0, keepdim=True).numpy().T + + meter = pyln.Meter(fs) + loudness = meter.integrated_loudness(audio) + audio = pyln.normalize.loudness(audio, loudness, target_lufs) + + os.makedirs(os.path.dirname(outfile), exist_ok=True) + np.save(outfile, audio.T) + +def loudness_norm( + data_path: str, + # output_path: str, + target_lufs = -17.0, +): + files = glob.glob( + os.path.join(data_path, "**", "*.wav"), recursive=True + ) + + outfiles = [ + f.replace(".wav", ".npy").replace("saded", "saded-np") for f in files + ] + + files = [(f, o, target_lufs) for f, o in zip(files, outfiles)] + + process_map(loudness_norm_one, files, chunksize=2) + + + +if __name__ == "__main__": + + from tqdm import tqdm + import fire + + fire.Fire() diff --git a/data_pipeline/seperation/models/bandit/core/data/musdb/validation.yaml b/data_pipeline/seperation/models/bandit/core/data/musdb/validation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f8752478d285d1d13d5e842225af1de95cae57a --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/data/musdb/validation.yaml @@ -0,0 +1,15 @@ +validation: + - 'Actions - One Minute Smile' + - 'Clara Berry And Wooldog - Waltz For My Victims' + - 'Johnny Lokke - Promises & Lies' + - 'Patrick Talbot - A Reason To Leave' + - 'Triviul - Angelsaint' + - 'Alexander Ross - Goodbye Bolero' + - 'Fergessen - Nos Palpitants' + - 'Leaf - Summerghost' + - 'Skelpolu - Human Mistakes' + - 'Young Griffo - Pennies' + - 'ANiMAL - Rockshow' + - 'James May - On The Line' + - 'Meaxic - Take A Step' + - 'Traffic Experiment - Sirens' \ No newline at end of file diff --git a/data_pipeline/seperation/models/bandit/core/loss/__init__.py b/data_pipeline/seperation/models/bandit/core/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab803aecde4f686e34d93f3f2d585e0a9867525 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/loss/__init__.py @@ -0,0 +1,2 @@ +from ._multistem import MultiStemWrapperFromConfig +from ._timefreq import ReImL1Loss, ReImL2Loss, TimeFreqL1Loss, TimeFreqL2Loss, TimeFreqSignalNoisePNormRatioLoss diff --git a/data_pipeline/seperation/models/bandit/core/loss/_complex.py b/data_pipeline/seperation/models/bandit/core/loss/_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..1d97e5d8bab3fdb095c2ba7c77faebef26e8f1ce --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/loss/_complex.py @@ -0,0 +1,34 @@ +from typing import Any + +import torch +from torch import nn +from torch.nn.modules import loss as _loss +from torch.nn.modules.loss import _Loss + + +class ReImLossWrapper(_Loss): + def __init__(self, module: _Loss) -> None: + super().__init__() + self.module = module + + def forward( + self, + preds: torch.Tensor, + target: torch.Tensor + ) -> torch.Tensor: + return self.module( + torch.view_as_real(preds), + torch.view_as_real(target) + ) + + +class ReImL1Loss(ReImLossWrapper): + def __init__(self, **kwargs: Any) -> None: + l1_loss = _loss.L1Loss(**kwargs) + super().__init__(module=(l1_loss)) + + +class ReImL2Loss(ReImLossWrapper): + def __init__(self, **kwargs: Any) -> None: + l2_loss = _loss.MSELoss(**kwargs) + super().__init__(module=(l2_loss)) diff --git a/data_pipeline/seperation/models/bandit/core/loss/_multistem.py b/data_pipeline/seperation/models/bandit/core/loss/_multistem.py new file mode 100644 index 0000000000000000000000000000000000000000..675e0ffbecf1f9f5efb0369bcb9d5c590efcfc31 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/loss/_multistem.py @@ -0,0 +1,45 @@ +from typing import Any, Dict + +import torch +from asteroid import losses as asteroid_losses +from torch import nn +from torch.nn.modules.loss import _Loss + +from . import snr + + +def parse_loss(name: str, kwargs: Dict[str, Any]) -> _Loss: + + for module in [nn.modules.loss, snr, asteroid_losses, asteroid_losses.sdr]: + if name in module.__dict__: + return module.__dict__[name](**kwargs) + + raise NameError + + +class MultiStemWrapper(_Loss): + def __init__(self, module: _Loss, modality: str = "audio") -> None: + super().__init__() + self.loss = module + self.modality = modality + + def forward( + self, + preds: Dict[str, Dict[str, torch.Tensor]], + target: Dict[str, Dict[str, torch.Tensor]], + ) -> torch.Tensor: + loss = { + stem: self.loss( + preds[self.modality][stem], + target[self.modality][stem] + ) + for stem in preds[self.modality] if stem in target[self.modality] + } + + return sum(list(loss.values())) + + +class MultiStemWrapperFromConfig(MultiStemWrapper): + def __init__(self, name: str, kwargs: Any, modality: str = "audio") -> None: + loss = parse_loss(name, kwargs) + super().__init__(module=loss, modality=modality) diff --git a/data_pipeline/seperation/models/bandit/core/loss/_timefreq.py b/data_pipeline/seperation/models/bandit/core/loss/_timefreq.py new file mode 100644 index 0000000000000000000000000000000000000000..6ea9d5994ca645546b5ccb7e6eafaa3d2fbcf959 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/loss/_timefreq.py @@ -0,0 +1,113 @@ +from typing import Any, Dict, Optional + +import torch +from torch import nn +from torch.nn.modules.loss import _Loss + +from models.bandit.core.loss._multistem import MultiStemWrapper +from models.bandit.core.loss._complex import ReImL1Loss, ReImL2Loss, ReImLossWrapper +from models.bandit.core.loss.snr import SignalNoisePNormRatio + +class TimeFreqWrapper(_Loss): + def __init__( + self, + time_module: _Loss, + freq_module: Optional[_Loss] = None, + time_weight: float = 1.0, + freq_weight: float = 1.0, + multistem: bool = True, + ) -> None: + super().__init__() + + if freq_module is None: + freq_module = time_module + + if multistem: + time_module = MultiStemWrapper(time_module, modality="audio") + freq_module = MultiStemWrapper(freq_module, modality="spectrogram") + + self.time_module = time_module + self.freq_module = freq_module + + self.time_weight = time_weight + self.freq_weight = freq_weight + + # TODO: add better type hints + def forward(self, preds: Any, target: Any) -> torch.Tensor: + + return self.time_weight * self.time_module( + preds, target + ) + self.freq_weight * self.freq_module(preds, target) + + +class TimeFreqL1Loss(TimeFreqWrapper): + def __init__( + self, + time_weight: float = 1.0, + freq_weight: float = 1.0, + tkwargs: Optional[Dict[str, Any]] = None, + fkwargs: Optional[Dict[str, Any]] = None, + multistem: bool = True, + ) -> None: + if tkwargs is None: + tkwargs = {} + if fkwargs is None: + fkwargs = {} + time_module = (nn.L1Loss(**tkwargs)) + freq_module = ReImL1Loss(**fkwargs) + super().__init__( + time_module, + freq_module, + time_weight, + freq_weight, + multistem + ) + + +class TimeFreqL2Loss(TimeFreqWrapper): + def __init__( + self, + time_weight: float = 1.0, + freq_weight: float = 1.0, + tkwargs: Optional[Dict[str, Any]] = None, + fkwargs: Optional[Dict[str, Any]] = None, + multistem: bool = True, + ) -> None: + if tkwargs is None: + tkwargs = {} + if fkwargs is None: + fkwargs = {} + time_module = nn.MSELoss(**tkwargs) + freq_module = ReImL2Loss(**fkwargs) + super().__init__( + time_module, + freq_module, + time_weight, + freq_weight, + multistem + ) + + + +class TimeFreqSignalNoisePNormRatioLoss(TimeFreqWrapper): + def __init__( + self, + time_weight: float = 1.0, + freq_weight: float = 1.0, + tkwargs: Optional[Dict[str, Any]] = None, + fkwargs: Optional[Dict[str, Any]] = None, + multistem: bool = True, + ) -> None: + if tkwargs is None: + tkwargs = {} + if fkwargs is None: + fkwargs = {} + time_module = SignalNoisePNormRatio(**tkwargs) + freq_module = SignalNoisePNormRatio(**fkwargs) + super().__init__( + time_module, + freq_module, + time_weight, + freq_weight, + multistem + ) diff --git a/data_pipeline/seperation/models/bandit/core/loss/snr.py b/data_pipeline/seperation/models/bandit/core/loss/snr.py new file mode 100644 index 0000000000000000000000000000000000000000..2996dd57080db687599c1fd673d6807041a04b52 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/loss/snr.py @@ -0,0 +1,146 @@ +import torch +from torch.nn.modules.loss import _Loss +from torch.nn import functional as F + +class SignalNoisePNormRatio(_Loss): + def __init__( + self, + p: float = 1.0, + scale_invariant: bool = False, + zero_mean: bool = False, + take_log: bool = True, + reduction: str = "mean", + EPS: float = 1e-3, + ) -> None: + assert reduction != "sum", NotImplementedError + super().__init__(reduction=reduction) + assert not zero_mean + + self.p = p + + self.EPS = EPS + self.take_log = take_log + + self.scale_invariant = scale_invariant + + def forward( + self, + est_target: torch.Tensor, + target: torch.Tensor + ) -> torch.Tensor: + + target_ = target + if self.scale_invariant: + ndim = target.ndim + dot = torch.sum(est_target * torch.conj(target), dim=-1, keepdim=True) + s_target_energy = ( + torch.sum(target * torch.conj(target), dim=-1, keepdim=True) + ) + + if ndim > 2: + dot = torch.sum(dot, dim=list(range(1, ndim)), keepdim=True) + s_target_energy = torch.sum(s_target_energy, dim=list(range(1, ndim)), keepdim=True) + + target_scaler = (dot + 1e-8) / (s_target_energy + 1e-8) + target = target_ * target_scaler + + if torch.is_complex(est_target): + est_target = torch.view_as_real(est_target) + target = torch.view_as_real(target) + + + batch_size = est_target.shape[0] + est_target = est_target.reshape(batch_size, -1) + target = target.reshape(batch_size, -1) + # target_ = target_.reshape(batch_size, -1) + + if self.p == 1: + e_error = torch.abs(est_target-target).mean(dim=-1) + e_target = torch.abs(target).mean(dim=-1) + elif self.p == 2: + e_error = torch.square(est_target-target).mean(dim=-1) + e_target = torch.square(target).mean(dim=-1) + else: + raise NotImplementedError + + if self.take_log: + loss = 10*(torch.log10(e_error + self.EPS) - torch.log10(e_target + self.EPS)) + else: + loss = (e_error + self.EPS)/(e_target + self.EPS) + + if self.reduction == "mean": + loss = loss.mean() + elif self.reduction == "sum": + loss = loss.sum() + + return loss + + + +class MultichannelSingleSrcNegSDR(_Loss): + def __init__( + self, + sdr_type: str, + p: float = 2.0, + zero_mean: bool = True, + take_log: bool = True, + reduction: str = "mean", + EPS: float = 1e-8, + ) -> None: + assert reduction != "sum", NotImplementedError + super().__init__(reduction=reduction) + + assert sdr_type in ["snr", "sisdr", "sdsdr"] + self.sdr_type = sdr_type + self.zero_mean = zero_mean + self.take_log = take_log + self.EPS = 1e-8 + + self.p = p + + def forward( + self, + est_target: torch.Tensor, + target: torch.Tensor + ) -> torch.Tensor: + if target.size() != est_target.size() or target.ndim != 3: + raise TypeError( + f"Inputs must be of shape [batch, time], got {target.size()} and {est_target.size()} instead" + ) + # Step 1. Zero-mean norm + if self.zero_mean: + mean_source = torch.mean(target, dim=[1, 2], keepdim=True) + mean_estimate = torch.mean(est_target, dim=[1, 2], keepdim=True) + target = target - mean_source + est_target = est_target - mean_estimate + # Step 2. Pair-wise SI-SDR. + if self.sdr_type in ["sisdr", "sdsdr"]: + # [batch, 1] + dot = torch.sum(est_target * target, dim=[1, 2], keepdim=True) + # [batch, 1] + s_target_energy = ( + torch.sum(target ** 2, dim=[1, 2], keepdim=True) + self.EPS + ) + # [batch, time] + scaled_target = dot * target / s_target_energy + else: + # [batch, time] + scaled_target = target + if self.sdr_type in ["sdsdr", "snr"]: + e_noise = est_target - target + else: + e_noise = est_target - scaled_target + # [batch] + + if self.p == 2.0: + losses = torch.sum(scaled_target ** 2, dim=[1, 2]) / ( + torch.sum(e_noise ** 2, dim=[1, 2]) + self.EPS + ) + else: + losses = torch.norm(scaled_target, p=self.p, dim=[1, 2]) / ( + torch.linalg.vector_norm(e_noise, p=self.p, dim=[1, 2]) + self.EPS + ) + if self.take_log: + losses = 10 * torch.log10(losses + self.EPS) + losses = losses.mean() if self.reduction == "mean" else losses + return -losses diff --git a/data_pipeline/seperation/models/bandit/core/metrics/__init__.py b/data_pipeline/seperation/models/bandit/core/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c638b4df585ad6c3c6490d9e67b7fc197f0d06f4 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/metrics/__init__.py @@ -0,0 +1,9 @@ +from .snr import ( + ChunkMedianScaleInvariantSignalDistortionRatio, + ChunkMedianScaleInvariantSignalNoiseRatio, + ChunkMedianSignalDistortionRatio, + ChunkMedianSignalNoiseRatio, + SafeSignalDistortionRatio, +) + +# from .mushra import EstimatedMushraScore diff --git a/data_pipeline/seperation/models/bandit/core/metrics/_squim.py b/data_pipeline/seperation/models/bandit/core/metrics/_squim.py new file mode 100644 index 0000000000000000000000000000000000000000..ec76b5fb5e27d0f6a6aaa5ececc5161482150bfc --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/metrics/_squim.py @@ -0,0 +1,383 @@ +from dataclasses import dataclass + +from torchaudio._internal import load_state_dict_from_url + +import math +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def transform_wb_pesq_range(x: float) -> float: + """The metric defined by ITU-T P.862 is often called 'PESQ score', which is defined + for narrow-band signals and has a value range of [-0.5, 4.5] exactly. Here, we use the metric + defined by ITU-T P.862.2, commonly known as 'wide-band PESQ' and will be referred to as "PESQ score". + + Args: + x (float): Narrow-band PESQ score. + + Returns: + (float): Wide-band PESQ score. + """ + return 0.999 + (4.999 - 0.999) / (1 + math.exp(-1.3669 * x + 3.8224)) + + +PESQRange: Tuple[float, float] = ( + 1.0, # P.862.2 uses a different input filter than P.862, and the lower bound of + # the raw score is not -0.5 anymore. It's hard to figure out the true lower bound. + # We are using 1.0 as a reasonable approximation. + transform_wb_pesq_range(4.5), +) + + +class RangeSigmoid(nn.Module): + def __init__(self, val_range: Tuple[float, float] = (0.0, 1.0)) -> None: + super(RangeSigmoid, self).__init__() + assert isinstance(val_range, tuple) and len(val_range) == 2 + self.val_range: Tuple[float, float] = val_range + self.sigmoid: nn.modules.Module = nn.Sigmoid() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = self.sigmoid(x) * (self.val_range[1] - self.val_range[0]) + self.val_range[0] + return out + + +class Encoder(nn.Module): + """Encoder module that transform 1D waveform to 2D representations. + + Args: + feat_dim (int, optional): The feature dimension after Encoder module. (Default: 512) + win_len (int, optional): kernel size in the Conv1D layer. (Default: 32) + """ + + def __init__(self, feat_dim: int = 512, win_len: int = 32) -> None: + super(Encoder, self).__init__() + + self.conv1d = nn.Conv1d(1, feat_dim, win_len, stride=win_len // 2, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Apply waveforms to convolutional layer and ReLU layer. + + Args: + x (torch.Tensor): Input waveforms. Tensor with dimensions `(batch, time)`. + + Returns: + (torch,Tensor): Feature Tensor with dimensions `(batch, channel, frame)`. + """ + out = x.unsqueeze(dim=1) + out = F.relu(self.conv1d(out)) + return out + + +class SingleRNN(nn.Module): + def __init__(self, rnn_type: str, input_size: int, hidden_size: int, dropout: float = 0.0) -> None: + super(SingleRNN, self).__init__() + + self.rnn_type = rnn_type + self.input_size = input_size + self.hidden_size = hidden_size + + self.rnn: nn.modules.Module = getattr(nn, rnn_type)( + input_size, + hidden_size, + 1, + dropout=dropout, + batch_first=True, + bidirectional=True, + ) + + self.proj = nn.Linear(hidden_size * 2, input_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # input shape: batch, seq, dim + out, _ = self.rnn(x) + out = self.proj(out) + return out + + +class DPRNN(nn.Module): + """*Dual-path recurrent neural networks (DPRNN)* :cite:`luo2020dual`. + + Args: + feat_dim (int, optional): The feature dimension after Encoder module. (Default: 64) + hidden_dim (int, optional): Hidden dimension in the RNN layer of DPRNN. (Default: 128) + num_blocks (int, optional): Number of DPRNN layers. (Default: 6) + rnn_type (str, optional): Type of RNN in DPRNN. Valid options are ["RNN", "LSTM", "GRU"]. (Default: "LSTM") + d_model (int, optional): The number of expected features in the input. (Default: 256) + chunk_size (int, optional): Chunk size of input for DPRNN. (Default: 100) + chunk_stride (int, optional): Stride of chunk input for DPRNN. (Default: 50) + """ + + def __init__( + self, + feat_dim: int = 64, + hidden_dim: int = 128, + num_blocks: int = 6, + rnn_type: str = "LSTM", + d_model: int = 256, + chunk_size: int = 100, + chunk_stride: int = 50, + ) -> None: + super(DPRNN, self).__init__() + + self.num_blocks = num_blocks + + self.row_rnn = nn.ModuleList([]) + self.col_rnn = nn.ModuleList([]) + self.row_norm = nn.ModuleList([]) + self.col_norm = nn.ModuleList([]) + for _ in range(num_blocks): + self.row_rnn.append(SingleRNN(rnn_type, feat_dim, hidden_dim)) + self.col_rnn.append(SingleRNN(rnn_type, feat_dim, hidden_dim)) + self.row_norm.append(nn.GroupNorm(1, feat_dim, eps=1e-8)) + self.col_norm.append(nn.GroupNorm(1, feat_dim, eps=1e-8)) + self.conv = nn.Sequential( + nn.Conv2d(feat_dim, d_model, 1), + nn.PReLU(), + ) + self.chunk_size = chunk_size + self.chunk_stride = chunk_stride + + def pad_chunk(self, x: torch.Tensor) -> Tuple[torch.Tensor, int]: + # input shape: (B, N, T) + seq_len = x.shape[-1] + + rest = self.chunk_size - (self.chunk_stride + seq_len % self.chunk_size) % self.chunk_size + out = F.pad(x, [self.chunk_stride, rest + self.chunk_stride]) + + return out, rest + + def chunking(self, x: torch.Tensor) -> Tuple[torch.Tensor, int]: + out, rest = self.pad_chunk(x) + batch_size, feat_dim, seq_len = out.shape + + segments1 = out[:, :, : -self.chunk_stride].contiguous().view(batch_size, feat_dim, -1, self.chunk_size) + segments2 = out[:, :, self.chunk_stride :].contiguous().view(batch_size, feat_dim, -1, self.chunk_size) + out = torch.cat([segments1, segments2], dim=3) + out = out.view(batch_size, feat_dim, -1, self.chunk_size).transpose(2, 3).contiguous() + + return out, rest + + def merging(self, x: torch.Tensor, rest: int) -> torch.Tensor: + batch_size, dim, _, _ = x.shape + out = x.transpose(2, 3).contiguous().view(batch_size, dim, -1, self.chunk_size * 2) + out1 = out[:, :, :, : self.chunk_size].contiguous().view(batch_size, dim, -1)[:, :, self.chunk_stride :] + out2 = out[:, :, :, self.chunk_size :].contiguous().view(batch_size, dim, -1)[:, :, : -self.chunk_stride] + out = out1 + out2 + if rest > 0: + out = out[:, :, :-rest] + out = out.contiguous() + return out + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x, rest = self.chunking(x) + batch_size, _, dim1, dim2 = x.shape + out = x + for row_rnn, row_norm, col_rnn, col_norm in zip(self.row_rnn, self.row_norm, self.col_rnn, self.col_norm): + row_in = out.permute(0, 3, 2, 1).contiguous().view(batch_size * dim2, dim1, -1).contiguous() + row_out = row_rnn(row_in) + row_out = row_out.view(batch_size, dim2, dim1, -1).permute(0, 3, 2, 1).contiguous() + row_out = row_norm(row_out) + out = out + row_out + + col_in = out.permute(0, 2, 3, 1).contiguous().view(batch_size * dim1, dim2, -1).contiguous() + col_out = col_rnn(col_in) + col_out = col_out.view(batch_size, dim1, dim2, -1).permute(0, 3, 1, 2).contiguous() + col_out = col_norm(col_out) + out = out + col_out + out = self.conv(out) + out = self.merging(out, rest) + out = out.transpose(1, 2).contiguous() + return out + + +class AutoPool(nn.Module): + def __init__(self, pool_dim: int = 1) -> None: + super(AutoPool, self).__init__() + self.pool_dim: int = pool_dim + self.softmax: nn.modules.Module = nn.Softmax(dim=pool_dim) + self.register_parameter("alpha", nn.Parameter(torch.ones(1))) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + weight = self.softmax(torch.mul(x, self.alpha)) + out = torch.sum(torch.mul(x, weight), dim=self.pool_dim) + return out + + +class SquimObjective(nn.Module): + """Speech Quality and Intelligibility Measures (SQUIM) model that predicts **objective** metric scores + for speech enhancement (e.g., STOI, PESQ, and SI-SDR). + + Args: + encoder (torch.nn.Module): Encoder module to transform 1D waveform to 2D feature representation. + dprnn (torch.nn.Module): DPRNN module to model sequential feature. + branches (torch.nn.ModuleList): Transformer branches in which each branch estimate one objective metirc score. + """ + + def __init__( + self, + encoder: nn.Module, + dprnn: nn.Module, + branches: nn.ModuleList, + ): + super(SquimObjective, self).__init__() + self.encoder = encoder + self.dprnn = dprnn + self.branches = branches + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + """ + Args: + x (torch.Tensor): Input waveforms. Tensor with dimensions `(batch, time)`. + + Returns: + List(torch.Tensor): List of score Tenosrs. Each Tensor is with dimension `(batch,)`. + """ + if x.ndim != 2: + raise ValueError(f"The input must be a 2D Tensor. Found dimension {x.ndim}.") + x = x / (torch.mean(x**2, dim=1, keepdim=True) ** 0.5 * 20) + out = self.encoder(x) + out = self.dprnn(out) + scores = [] + for branch in self.branches: + scores.append(branch(out).squeeze(dim=1)) + return scores + + +def _create_branch(d_model: int, nhead: int, metric: str) -> nn.modules.Module: + """Create branch module after DPRNN model for predicting metric score. + + Args: + d_model (int): The number of expected features in the input. + nhead (int): Number of heads in the multi-head attention model. + metric (str): The metric name to predict. + + Returns: + (nn.Module): Returned module to predict corresponding metric score. + """ + layer1 = nn.TransformerEncoderLayer(d_model, nhead, d_model * 4, dropout=0.0, batch_first=True) + layer2 = AutoPool() + if metric == "stoi": + layer3 = nn.Sequential( + nn.Linear(d_model, d_model), + nn.PReLU(), + nn.Linear(d_model, 1), + RangeSigmoid(), + ) + elif metric == "pesq": + layer3 = nn.Sequential( + nn.Linear(d_model, d_model), + nn.PReLU(), + nn.Linear(d_model, 1), + RangeSigmoid(val_range=PESQRange), + ) + else: + layer3: nn.modules.Module = nn.Sequential(nn.Linear(d_model, d_model), nn.PReLU(), nn.Linear(d_model, 1)) + return nn.Sequential(layer1, layer2, layer3) + + +def squim_objective_model( + feat_dim: int, + win_len: int, + d_model: int, + nhead: int, + hidden_dim: int, + num_blocks: int, + rnn_type: str, + chunk_size: int, + chunk_stride: Optional[int] = None, +) -> SquimObjective: + """Build a custome :class:`torchaudio.prototype.models.SquimObjective` model. + + Args: + feat_dim (int, optional): The feature dimension after Encoder module. + win_len (int): Kernel size in the Encoder module. + d_model (int): The number of expected features in the input. + nhead (int): Number of heads in the multi-head attention model. + hidden_dim (int): Hidden dimension in the RNN layer of DPRNN. + num_blocks (int): Number of DPRNN layers. + rnn_type (str): Type of RNN in DPRNN. Valid options are ["RNN", "LSTM", "GRU"]. + chunk_size (int): Chunk size of input for DPRNN. + chunk_stride (int or None, optional): Stride of chunk input for DPRNN. + """ + if chunk_stride is None: + chunk_stride = chunk_size // 2 + encoder = Encoder(feat_dim, win_len) + dprnn = DPRNN(feat_dim, hidden_dim, num_blocks, rnn_type, d_model, chunk_size, chunk_stride) + branches = nn.ModuleList( + [ + _create_branch(d_model, nhead, "stoi"), + _create_branch(d_model, nhead, "pesq"), + _create_branch(d_model, nhead, "sisdr"), + ] + ) + return SquimObjective(encoder, dprnn, branches) + + +def squim_objective_base() -> SquimObjective: + """Build :class:`torchaudio.prototype.models.SquimObjective` model with default arguments.""" + return squim_objective_model( + feat_dim=256, + win_len=64, + d_model=256, + nhead=4, + hidden_dim=256, + num_blocks=2, + rnn_type="LSTM", + chunk_size=71, + ) + +@dataclass +class SquimObjectiveBundle: + + _path: str + _sample_rate: float + + def _get_state_dict(self, dl_kwargs): + url = f"https://download.pytorch.org/torchaudio/models/{self._path}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + return state_dict + + def get_model(self, *, dl_kwargs=None) -> SquimObjective: + """Construct the SquimObjective model, and load the pretrained weight. + + The weight file is downloaded from the internet and cached with + :func:`torch.hub.load_state_dict_from_url` + + Args: + dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Variation of :py:class:`~torchaudio.models.SquimObjective`. + """ + model = squim_objective_base() + model.load_state_dict(self._get_state_dict(dl_kwargs)) + model.eval() + return model + + @property + def sample_rate(self): + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + +SQUIM_OBJECTIVE = SquimObjectiveBundle( + "squim_objective_dns2020.pth", + _sample_rate=16000, +) +SQUIM_OBJECTIVE.__doc__ = """SquimObjective pipeline trained using approach described in + :cite:`kumar2023torchaudio` on the *DNS 2020 Dataset* :cite:`reddy2020interspeech`. + + The underlying model is constructed by :py:func:`torchaudio.models.squim_objective_base`. + The weights are under `Creative Commons Attribution 4.0 International License + `__. + + Please refer to :py:class:`SquimObjectiveBundle` for usage instructions. + """ + diff --git a/data_pipeline/seperation/models/bandit/core/metrics/snr.py b/data_pipeline/seperation/models/bandit/core/metrics/snr.py new file mode 100644 index 0000000000000000000000000000000000000000..d2830b2cbecfa681c449d09e2d4c35a20fc98128 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/metrics/snr.py @@ -0,0 +1,150 @@ +from typing import Any, Callable + +import numpy as np +import torch +import torchmetrics as tm +from torch._C import _LinAlgError +from torchmetrics import functional as tmF + + +class SafeSignalDistortionRatio(tm.SignalDistortionRatio): + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + def update(self, *args, **kwargs) -> Any: + try: + super().update(*args, **kwargs) + except: + pass + + def compute(self) -> Any: + if self.total == 0: + return torch.tensor(torch.nan) + return super().compute() + + +class BaseChunkMedianSignalRatio(tm.Metric): + def __init__( + self, + func: Callable, + window_size: int, + hop_size: int = None, + zero_mean: bool = False, + ) -> None: + super().__init__() + + # self.zero_mean = zero_mean + self.func = func + self.window_size = window_size + if hop_size is None: + hop_size = window_size + self.hop_size = hop_size + + self.add_state( + "sum_snr", + default=torch.tensor(0.0), + dist_reduce_fx="sum" + ) + self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") + + def update(self, preds: torch.Tensor, target: torch.Tensor) -> None: + + n_samples = target.shape[-1] + + n_chunks = int( + np.ceil((n_samples - self.window_size) / self.hop_size) + 1 + ) + + snr_chunk = [] + + for i in range(n_chunks): + start = i * self.hop_size + + if n_samples - start < self.window_size: + continue + + end = start + self.window_size + + try: + chunk_snr = self.func( + preds[..., start:end], + target[..., start:end] + ) + + # print(preds.shape, chunk_snr.shape) + + if torch.all(torch.isfinite(chunk_snr)): + snr_chunk.append(chunk_snr) + except _LinAlgError: + pass + + snr_chunk = torch.stack(snr_chunk, dim=-1) + snr_batch, _ = torch.nanmedian(snr_chunk, dim=-1) + + self.sum_snr += snr_batch.sum() + self.total += snr_batch.numel() + + def compute(self) -> Any: + return self.sum_snr / self.total + + +class ChunkMedianSignalNoiseRatio(BaseChunkMedianSignalRatio): + def __init__( + self, + window_size: int, + hop_size: int = None, + zero_mean: bool = False + ) -> None: + super().__init__( + func=tmF.signal_noise_ratio, + window_size=window_size, + hop_size=hop_size, + zero_mean=zero_mean, + ) + + +class ChunkMedianScaleInvariantSignalNoiseRatio(BaseChunkMedianSignalRatio): + def __init__( + self, + window_size: int, + hop_size: int = None, + zero_mean: bool = False + ) -> None: + super().__init__( + func=tmF.scale_invariant_signal_noise_ratio, + window_size=window_size, + hop_size=hop_size, + zero_mean=zero_mean, + ) + + +class ChunkMedianSignalDistortionRatio(BaseChunkMedianSignalRatio): + def __init__( + self, + window_size: int, + hop_size: int = None, + zero_mean: bool = False + ) -> None: + super().__init__( + func=tmF.signal_distortion_ratio, + window_size=window_size, + hop_size=hop_size, + zero_mean=zero_mean, + ) + + +class ChunkMedianScaleInvariantSignalDistortionRatio( + BaseChunkMedianSignalRatio + ): + def __init__( + self, + window_size: int, + hop_size: int = None, + zero_mean: bool = False + ) -> None: + super().__init__( + func=tmF.scale_invariant_signal_distortion_ratio, + window_size=window_size, + hop_size=hop_size, + zero_mean=zero_mean, + ) diff --git a/data_pipeline/seperation/models/bandit/core/model/__init__.py b/data_pipeline/seperation/models/bandit/core/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54ac48eb69d6f844ba5b73b213eae4cfab157cac --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/__init__.py @@ -0,0 +1,3 @@ +from .bsrnn.wrapper import ( + MultiMaskMultiSourceBandSplitRNNSimple, +) diff --git a/data_pipeline/seperation/models/bandit/core/model/_spectral.py b/data_pipeline/seperation/models/bandit/core/model/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..564cd28600719579227a6085eed5e9d6ee521312 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/_spectral.py @@ -0,0 +1,58 @@ +from typing import Dict, Optional + +import torch +import torchaudio as ta +from torch import nn + + +class _SpectralComponent(nn.Module): + def __init__( + self, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + **kwargs, + ) -> None: + super().__init__() + + assert power is None + + window_fn = torch.__dict__[window_fn] + + self.stft = ( + ta.transforms.Spectrogram( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + pad_mode=pad_mode, + pad=0, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + normalized=normalized, + center=center, + onesided=onesided, + ) + ) + + self.istft = ( + ta.transforms.InverseSpectrogram( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + pad_mode=pad_mode, + pad=0, + window_fn=window_fn, + wkwargs=wkwargs, + normalized=normalized, + center=center, + onesided=onesided, + ) + ) diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/__init__.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c27826197fc8f4eb7a7036d8037966a58d8b38d4 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/__init__.py @@ -0,0 +1,23 @@ +from abc import ABC +from typing import Iterable, Mapping, Union + +from torch import nn + +from models.bandit.core.model.bsrnn.bandsplit import BandSplitModule +from models.bandit.core.model.bsrnn.tfmodel import ( + SeqBandModellingModule, + TransformerTimeFreqModule, +) + + +class BandsplitCoreBase(nn.Module, ABC): + band_split: nn.Module + tf_model: nn.Module + mask_estim: Union[nn.Module, Mapping[str, nn.Module], Iterable[nn.Module]] + + def __init__(self) -> None: + super().__init__() + + @staticmethod + def mask(x, m): + return x * m diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/bandsplit.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/bandsplit.py new file mode 100644 index 0000000000000000000000000000000000000000..63e6255857fb2d538634be317332afb2f93e145d --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/bandsplit.py @@ -0,0 +1,139 @@ +from typing import List, Tuple + +import torch +from torch import nn + +from models.bandit.core.model.bsrnn.utils import ( + band_widths_from_specs, + check_no_gap, + check_no_overlap, + check_nonzero_bandwidth, +) + + +class NormFC(nn.Module): + def __init__( + self, + emb_dim: int, + bandwidth: int, + in_channel: int, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + ) -> None: + super().__init__() + + self.treat_channel_as_feature = treat_channel_as_feature + + if normalize_channel_independently: + raise NotImplementedError + + reim = 2 + + self.norm = nn.LayerNorm(in_channel * bandwidth * reim) + + fc_in = bandwidth * reim + + if treat_channel_as_feature: + fc_in *= in_channel + else: + assert emb_dim % in_channel == 0 + emb_dim = emb_dim // in_channel + + self.fc = nn.Linear(fc_in, emb_dim) + + def forward(self, xb): + # xb = (batch, n_time, in_chan, reim * band_width) + + batch, n_time, in_chan, ribw = xb.shape + xb = self.norm(xb.reshape(batch, n_time, in_chan * ribw)) + # (batch, n_time, in_chan * reim * band_width) + + if not self.treat_channel_as_feature: + xb = xb.reshape(batch, n_time, in_chan, ribw) + # (batch, n_time, in_chan, reim * band_width) + + zb = self.fc(xb) + # (batch, n_time, emb_dim) + # OR + # (batch, n_time, in_chan, emb_dim_per_chan) + + if not self.treat_channel_as_feature: + batch, n_time, in_chan, emb_dim_per_chan = zb.shape + # (batch, n_time, in_chan, emb_dim_per_chan) + zb = zb.reshape((batch, n_time, in_chan * emb_dim_per_chan)) + + return zb # (batch, n_time, emb_dim) + + +class BandSplitModule(nn.Module): + def __init__( + self, + band_specs: List[Tuple[float, float]], + emb_dim: int, + in_channel: int, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + ) -> None: + super().__init__() + + check_nonzero_bandwidth(band_specs) + + if require_no_gap: + check_no_gap(band_specs) + + if require_no_overlap: + check_no_overlap(band_specs) + + self.band_specs = band_specs + # list of [fstart, fend) in index. + # Note that fend is exclusive. + self.band_widths = band_widths_from_specs(band_specs) + self.n_bands = len(band_specs) + self.emb_dim = emb_dim + + self.norm_fc_modules = nn.ModuleList( + [ # type: ignore + ( + NormFC( + emb_dim=emb_dim, + bandwidth=bw, + in_channel=in_channel, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + ) + ) + for bw in self.band_widths + ] + ) + + def forward(self, x: torch.Tensor): + # x = complex spectrogram (batch, in_chan, n_freq, n_time) + + batch, in_chan, _, n_time = x.shape + + z = torch.zeros( + size=(batch, self.n_bands, n_time, self.emb_dim), + device=x.device + ) + + xr = torch.view_as_real(x) # batch, in_chan, n_freq, n_time, 2 + xr = torch.permute( + xr, + (0, 3, 1, 4, 2) + ) # batch, n_time, in_chan, 2, n_freq + batch, n_time, in_chan, reim, band_width = xr.shape + for i, nfm in enumerate(self.norm_fc_modules): + # print(f"bandsplit/band{i:02d}") + fstart, fend = self.band_specs[i] + xb = xr[..., fstart:fend] + # (batch, n_time, in_chan, reim, band_width) + xb = torch.reshape(xb, (batch, n_time, in_chan, -1)) + # (batch, n_time, in_chan, reim * band_width) + # z.append(nfm(xb)) # (batch, n_time, emb_dim) + z[:, i, :, :] = nfm(xb.contiguous()) + + # z = torch.stack(z, dim=1) + + return z diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/core.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/core.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd36259002a395e7b7864f605fcab5b4422e422 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/core.py @@ -0,0 +1,661 @@ +from typing import Dict, List, Optional, Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from models.bandit.core.model.bsrnn import BandsplitCoreBase +from models.bandit.core.model.bsrnn.bandsplit import BandSplitModule +from models.bandit.core.model.bsrnn.maskestim import ( + MaskEstimationModule, + OverlappingMaskEstimationModule +) +from models.bandit.core.model.bsrnn.tfmodel import ( + ConvolutionalTimeFreqModule, + SeqBandModellingModule, + TransformerTimeFreqModule +) + + +class MultiMaskBandSplitCoreBase(BandsplitCoreBase): + def __init__(self) -> None: + super().__init__() + + def forward(self, x, cond=None, compute_residual: bool = True): + # x = complex spectrogram (batch, in_chan, n_freq, n_time) + # print(x.shape) + batch, in_chan, n_freq, n_time = x.shape + x = torch.reshape(x, (-1, 1, n_freq, n_time)) + + z = self.band_split(x) # (batch, emb_dim, n_band, n_time) + + # if torch.any(torch.isnan(z)): + # raise ValueError("z nan") + + # print(z) + q = self.tf_model(z) # (batch, emb_dim, n_band, n_time) + # print(q) + + + # if torch.any(torch.isnan(q)): + # raise ValueError("q nan") + + out = {} + + for stem, mem in self.mask_estim.items(): + m = mem(q, cond=cond) + + # if torch.any(torch.isnan(m)): + # raise ValueError("m nan", stem) + + s = self.mask(x, m) + s = torch.reshape(s, (batch, in_chan, n_freq, n_time)) + out[stem] = s + + return {"spectrogram": out} + + + + def instantiate_mask_estim(self, + in_channel: int, + stems: List[str], + band_specs: List[Tuple[float, float]], + emb_dim: int, + mlp_dim: int, + cond_dim: int, + hidden_activation: str, + + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + overlapping_band: bool = False, + freq_weights: Optional[List[torch.Tensor]] = None, + n_freq: Optional[int] = None, + use_freq_weights: bool = True, + mult_add_mask: bool = False + ): + if hidden_activation_kwargs is None: + hidden_activation_kwargs = {} + + if "mne:+" in stems: + stems = [s for s in stems if s != "mne:+"] + + if overlapping_band: + assert freq_weights is not None + assert n_freq is not None + + if mult_add_mask: + + self.mask_estim = nn.ModuleDict( + { + stem: MultAddMaskEstimationModule( + band_specs=band_specs, + freq_weights=freq_weights, + n_freq=n_freq, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + use_freq_weights=use_freq_weights, + ) + for stem in stems + } + ) + else: + self.mask_estim = nn.ModuleDict( + { + stem: OverlappingMaskEstimationModule( + band_specs=band_specs, + freq_weights=freq_weights, + n_freq=n_freq, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + use_freq_weights=use_freq_weights, + ) + for stem in stems + } + ) + else: + self.mask_estim = nn.ModuleDict( + { + stem: MaskEstimationModule( + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + for stem in stems + } + ) + + def instantiate_bandsplit(self, + in_channel: int, + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + emb_dim: int = 128 + ): + self.band_split = BandSplitModule( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim, + ) + +class SingleMaskBandsplitCoreBase(BandsplitCoreBase): + def __init__(self, **kwargs) -> None: + super().__init__() + + def forward(self, x): + # x = complex spectrogram (batch, in_chan, n_freq, n_time) + z = self.band_split(x) # (batch, emb_dim, n_band, n_time) + q = self.tf_model(z) # (batch, emb_dim, n_band, n_time) + m = self.mask_estim(q) # (batch, in_chan, n_freq, n_time) + + s = self.mask(x, m) + + return s + + +class SingleMaskBandsplitCoreRNN( + SingleMaskBandsplitCoreBase, +): + def __init__( + self, + in_channel: int, + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + ) -> None: + super().__init__() + self.band_split = (BandSplitModule( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim, + )) + self.tf_model = (SeqBandModellingModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + )) + self.mask_estim = (MaskEstimationModule( + in_channel=in_channel, + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + )) + + +class SingleMaskBandsplitCoreTransformer( + SingleMaskBandsplitCoreBase, +): + def __init__( + self, + in_channel: int, + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + tf_dropout: float = 0.0, + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + ) -> None: + super().__init__() + self.band_split = BandSplitModule( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim, + ) + self.tf_model = TransformerTimeFreqModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + dropout=tf_dropout, + ) + self.mask_estim = MaskEstimationModule( + in_channel=in_channel, + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + + +class MultiSourceMultiMaskBandSplitCoreRNN(MultiMaskBandSplitCoreBase): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + cond_dim: int = 0, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + overlapping_band: bool = False, + freq_weights: Optional[List[torch.Tensor]] = None, + n_freq: Optional[int] = None, + use_freq_weights: bool = True, + mult_add_mask: bool = False + ) -> None: + + super().__init__() + self.instantiate_bandsplit( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim + ) + + + self.tf_model = ( + SeqBandModellingModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + ) + ) + + self.mult_add_mask = mult_add_mask + + self.instantiate_mask_estim( + in_channel=in_channel, + stems=stems, + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=overlapping_band, + freq_weights=freq_weights, + n_freq=n_freq, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + @staticmethod + def _mult_add_mask(x, m): + + assert m.ndim == 5 + + mm = m[..., 0] + am = m[..., 1] + + # print(mm.shape, am.shape, x.shape, m.shape) + + return x * mm + am + + def mask(self, x, m): + if self.mult_add_mask: + + return self._mult_add_mask(x, m) + else: + return super().mask(x, m) + + +class MultiSourceMultiMaskBandSplitCoreTransformer( + MultiMaskBandSplitCoreBase, +): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + tf_dropout: float = 0.0, + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + overlapping_band: bool = False, + freq_weights: Optional[List[torch.Tensor]] = None, + n_freq: Optional[int] = None, + use_freq_weights:bool=True, + rnn_type: str = "LSTM", + cond_dim: int = 0, + mult_add_mask: bool = False + ) -> None: + super().__init__() + self.instantiate_bandsplit( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim + ) + self.tf_model = TransformerTimeFreqModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + dropout=tf_dropout, + ) + + self.instantiate_mask_estim( + in_channel=in_channel, + stems=stems, + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=overlapping_band, + freq_weights=freq_weights, + n_freq=n_freq, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + + +class MultiSourceMultiMaskBandSplitCoreConv( + MultiMaskBandSplitCoreBase, +): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: List[Tuple[float, float]], + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + tf_dropout: float = 0.0, + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + overlapping_band: bool = False, + freq_weights: Optional[List[torch.Tensor]] = None, + n_freq: Optional[int] = None, + use_freq_weights:bool=True, + rnn_type: str = "LSTM", + cond_dim: int = 0, + mult_add_mask: bool = False + ) -> None: + super().__init__() + self.instantiate_bandsplit( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim + ) + self.tf_model = ConvolutionalTimeFreqModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + dropout=tf_dropout, + ) + + self.instantiate_mask_estim( + in_channel=in_channel, + stems=stems, + band_specs=band_specs, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=overlapping_band, + freq_weights=freq_weights, + n_freq=n_freq, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + +class PatchingMaskBandsplitCoreBase(MultiMaskBandSplitCoreBase): + def __init__(self) -> None: + super().__init__() + + def mask(self, x, m): + # x.shape = (batch, n_channel, n_freq, n_time) + # m.shape = (kernel_freq, kernel_time, batch, n_channel, n_freq, n_time) + + _, n_channel, kernel_freq, kernel_time, n_freq, n_time = m.shape + padding = ((kernel_freq - 1) // 2, (kernel_time - 1) // 2) + + xf = F.unfold( + x, + kernel_size=(kernel_freq, kernel_time), + padding=padding, + stride=(1, 1), + ) + + xf = xf.view( + -1, + n_channel, + kernel_freq, + kernel_time, + n_freq, + n_time, + ) + + sf = xf * m + + sf = sf.view( + -1, + n_channel * kernel_freq * kernel_time, + n_freq * n_time, + ) + + s = F.fold( + sf, + output_size=(n_freq, n_time), + kernel_size=(kernel_freq, kernel_time), + padding=padding, + stride=(1, 1), + ).view( + -1, + n_channel, + n_freq, + n_time, + ) + + return s + + def old_mask(self, x, m): + # x.shape = (batch, n_channel, n_freq, n_time) + # m.shape = (kernel_freq, kernel_time, batch, n_channel, n_freq, n_time) + + s = torch.zeros_like(x) + + _, n_channel, n_freq, n_time = x.shape + kernel_freq, kernel_time, _, _, _, _ = m.shape + + # print(x.shape, m.shape) + + kernel_freq_half = (kernel_freq - 1) // 2 + kernel_time_half = (kernel_time - 1) // 2 + + for ifreq in range(kernel_freq): + for itime in range(kernel_time): + df, dt = kernel_freq_half - ifreq, kernel_time_half - itime + x = x.roll(shifts=(df, dt), dims=(2, 3)) + + # if `df` > 0: + # x[:, :, :df, :] = 0 + # elif `df` < 0: + # x[:, :, df:, :] = 0 + + # if `dt` > 0: + # x[:, :, :, :dt] = 0 + # elif `dt` < 0: + # x[:, :, :, dt:] = 0 + + fslice = slice(max(0, df), min(n_freq, n_freq + df)) + tslice = slice(max(0, dt), min(n_time, n_time + dt)) + + s[:, :, fslice, tslice] += x[:, :, fslice, tslice] * m[ifreq, + itime, :, + :, fslice, + tslice] + + return s + + +class MultiSourceMultiPatchingMaskBandSplitCoreRNN( + PatchingMaskBandsplitCoreBase +): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: List[Tuple[float, float]], + mask_kernel_freq: int, + mask_kernel_time: int, + conv_kernel_freq: int, + conv_kernel_time: int, + kernel_norm_mlp_version: int, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + overlapping_band: bool = False, + freq_weights: Optional[List[torch.Tensor]] = None, + n_freq: Optional[int] = None, + ) -> None: + + super().__init__() + self.band_split = BandSplitModule( + in_channel=in_channel, + band_specs=band_specs, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + emb_dim=emb_dim, + ) + + self.tf_model = ( + SeqBandModellingModule( + n_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + ) + ) + + if hidden_activation_kwargs is None: + hidden_activation_kwargs = {} + + if overlapping_band: + assert freq_weights is not None + assert n_freq is not None + self.mask_estim = nn.ModuleDict( + { + stem: PatchingMaskEstimationModule( + band_specs=band_specs, + freq_weights=freq_weights, + n_freq=n_freq, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + mask_kernel_freq=mask_kernel_freq, + mask_kernel_time=mask_kernel_time, + conv_kernel_freq=conv_kernel_freq, + conv_kernel_time=conv_kernel_time, + kernel_norm_mlp_version=kernel_norm_mlp_version + ) + for stem in stems + } + ) + else: + raise NotImplementedError diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/maskestim.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/maskestim.py new file mode 100644 index 0000000000000000000000000000000000000000..0b9289dfa702e02ff4d4f0dc76196fd39bb68e34 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/maskestim.py @@ -0,0 +1,347 @@ +import warnings +from typing import Dict, List, Optional, Tuple, Type + +import torch +from torch import nn +from torch.nn.modules import activation + +from models.bandit.core.model.bsrnn.utils import ( + band_widths_from_specs, + check_no_gap, + check_no_overlap, + check_nonzero_bandwidth, +) + + +class BaseNormMLP(nn.Module): + def __init__( + self, + emb_dim: int, + mlp_dim: int, + bandwidth: int, + in_channel: Optional[int], + hidden_activation: str = "Tanh", + hidden_activation_kwargs=None, + complex_mask: bool = True, ): + + super().__init__() + if hidden_activation_kwargs is None: + hidden_activation_kwargs = {} + self.hidden_activation_kwargs = hidden_activation_kwargs + self.norm = nn.LayerNorm(emb_dim) + self.hidden = torch.jit.script(nn.Sequential( + nn.Linear(in_features=emb_dim, out_features=mlp_dim), + activation.__dict__[hidden_activation]( + **self.hidden_activation_kwargs + ), + )) + + self.bandwidth = bandwidth + self.in_channel = in_channel + + self.complex_mask = complex_mask + self.reim = 2 if complex_mask else 1 + self.glu_mult = 2 + + +class NormMLP(BaseNormMLP): + def __init__( + self, + emb_dim: int, + mlp_dim: int, + bandwidth: int, + in_channel: Optional[int], + hidden_activation: str = "Tanh", + hidden_activation_kwargs=None, + complex_mask: bool = True, + ) -> None: + super().__init__( + emb_dim=emb_dim, + mlp_dim=mlp_dim, + bandwidth=bandwidth, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + + self.output = torch.jit.script( + nn.Sequential( + nn.Linear( + in_features=mlp_dim, + out_features=bandwidth * in_channel * self.reim * 2, + ), + nn.GLU(dim=-1), + ) + ) + + def reshape_output(self, mb): + # print(mb.shape) + batch, n_time, _ = mb.shape + if self.complex_mask: + mb = mb.reshape( + batch, + n_time, + self.in_channel, + self.bandwidth, + self.reim + ).contiguous() + # print(mb.shape) + mb = torch.view_as_complex( + mb + ) # (batch, n_time, in_channel, bandwidth) + else: + mb = mb.reshape(batch, n_time, self.in_channel, self.bandwidth) + + mb = torch.permute( + mb, + (0, 2, 3, 1) + ) # (batch, in_channel, bandwidth, n_time) + + return mb + + def forward(self, qb): + # qb = (batch, n_time, emb_dim) + + # if torch.any(torch.isnan(qb)): + # raise ValueError("qb0") + + + qb = self.norm(qb) # (batch, n_time, emb_dim) + + # if torch.any(torch.isnan(qb)): + # raise ValueError("qb1") + + qb = self.hidden(qb) # (batch, n_time, mlp_dim) + # if torch.any(torch.isnan(qb)): + # raise ValueError("qb2") + mb = self.output(qb) # (batch, n_time, bandwidth * in_channel * reim) + # if torch.any(torch.isnan(qb)): + # raise ValueError("mb") + mb = self.reshape_output(mb) # (batch, in_channel, bandwidth, n_time) + + return mb + + +class MultAddNormMLP(NormMLP): + def __init__(self, emb_dim: int, mlp_dim: int, bandwidth: int, in_channel: "int | None", hidden_activation: str = "Tanh", hidden_activation_kwargs=None, complex_mask: bool = True) -> None: + super().__init__(emb_dim, mlp_dim, bandwidth, in_channel, hidden_activation, hidden_activation_kwargs, complex_mask) + + self.output2 = torch.jit.script( + nn.Sequential( + nn.Linear( + in_features=mlp_dim, + out_features=bandwidth * in_channel * self.reim * 2, + ), + nn.GLU(dim=-1), + ) + ) + + def forward(self, qb): + + qb = self.norm(qb) # (batch, n_time, emb_dim) + qb = self.hidden(qb) # (batch, n_time, mlp_dim) + mmb = self.output(qb) # (batch, n_time, bandwidth * in_channel * reim) + mmb = self.reshape_output(mmb) # (batch, in_channel, bandwidth, n_time) + amb = self.output2(qb) # (batch, n_time, bandwidth * in_channel * reim) + amb = self.reshape_output(amb) # (batch, in_channel, bandwidth, n_time) + + return mmb, amb + + +class MaskEstimationModuleSuperBase(nn.Module): + pass + + +class MaskEstimationModuleBase(MaskEstimationModuleSuperBase): + def __init__( + self, + band_specs: List[Tuple[float, float]], + emb_dim: int, + mlp_dim: int, + in_channel: Optional[int], + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Dict = None, + complex_mask: bool = True, + norm_mlp_cls: Type[nn.Module] = NormMLP, + norm_mlp_kwargs: Dict = None, + ) -> None: + super().__init__() + + self.band_widths = band_widths_from_specs(band_specs) + self.n_bands = len(band_specs) + + if hidden_activation_kwargs is None: + hidden_activation_kwargs = {} + + if norm_mlp_kwargs is None: + norm_mlp_kwargs = {} + + self.norm_mlp = nn.ModuleList( + [ + ( + norm_mlp_cls( + bandwidth=self.band_widths[b], + emb_dim=emb_dim, + mlp_dim=mlp_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + **norm_mlp_kwargs, + ) + ) + for b in range(self.n_bands) + ] + ) + + def compute_masks(self, q): + batch, n_bands, n_time, emb_dim = q.shape + + masks = [] + + for b, nmlp in enumerate(self.norm_mlp): + # print(f"maskestim/{b:02d}") + qb = q[:, b, :, :] + mb = nmlp(qb) + masks.append(mb) + + return masks + + + +class OverlappingMaskEstimationModule(MaskEstimationModuleBase): + def __init__( + self, + in_channel: int, + band_specs: List[Tuple[float, float]], + freq_weights: List[torch.Tensor], + n_freq: int, + emb_dim: int, + mlp_dim: int, + cond_dim: int = 0, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Dict = None, + complex_mask: bool = True, + norm_mlp_cls: Type[nn.Module] = NormMLP, + norm_mlp_kwargs: Dict = None, + use_freq_weights: bool = True, + ) -> None: + check_nonzero_bandwidth(band_specs) + check_no_gap(band_specs) + + # if cond_dim > 0: + # raise NotImplementedError + + super().__init__( + band_specs=band_specs, + emb_dim=emb_dim + cond_dim, + mlp_dim=mlp_dim, + in_channel=in_channel, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + norm_mlp_cls=norm_mlp_cls, + norm_mlp_kwargs=norm_mlp_kwargs, + ) + + self.n_freq = n_freq + self.band_specs = band_specs + self.in_channel = in_channel + + if freq_weights is not None: + for i, fw in enumerate(freq_weights): + self.register_buffer(f"freq_weights/{i}", fw) + + self.use_freq_weights = use_freq_weights + else: + self.use_freq_weights = False + + self.cond_dim = cond_dim + + def forward(self, q, cond=None): + # q = (batch, n_bands, n_time, emb_dim) + + batch, n_bands, n_time, emb_dim = q.shape + + if cond is not None: + print(cond) + if cond.ndim == 2: + cond = cond[:, None, None, :].expand(-1, n_bands, n_time, -1) + elif cond.ndim == 3: + assert cond.shape[1] == n_time + else: + raise ValueError(f"Invalid cond shape: {cond.shape}") + + q = torch.cat([q, cond], dim=-1) + elif self.cond_dim > 0: + cond = torch.ones( + (batch, n_bands, n_time, self.cond_dim), + device=q.device, + dtype=q.dtype, + ) + q = torch.cat([q, cond], dim=-1) + else: + pass + + mask_list = self.compute_masks( + q + ) # [n_bands * (batch, in_channel, bandwidth, n_time)] + + masks = torch.zeros( + (batch, self.in_channel, self.n_freq, n_time), + device=q.device, + dtype=mask_list[0].dtype, + ) + + for im, mask in enumerate(mask_list): + fstart, fend = self.band_specs[im] + if self.use_freq_weights: + fw = self.get_buffer(f"freq_weights/{im}")[:, None] + mask = mask * fw + masks[:, :, fstart:fend, :] += mask + + return masks + + +class MaskEstimationModule(OverlappingMaskEstimationModule): + def __init__( + self, + band_specs: List[Tuple[float, float]], + emb_dim: int, + mlp_dim: int, + in_channel: Optional[int], + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Dict = None, + complex_mask: bool = True, + **kwargs, + ) -> None: + check_nonzero_bandwidth(band_specs) + check_no_gap(band_specs) + check_no_overlap(band_specs) + super().__init__( + in_channel=in_channel, + band_specs=band_specs, + freq_weights=None, + n_freq=None, + emb_dim=emb_dim, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + + def forward(self, q, cond=None): + # q = (batch, n_bands, n_time, emb_dim) + + masks = self.compute_masks( + q + ) # [n_bands * (batch, in_channel, bandwidth, n_time)] + + # TODO: currently this requires band specs to have no gap and no overlap + masks = torch.concat( + masks, + dim=2 + ) # (batch, in_channel, n_freq, n_time) + + return masks diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/tfmodel.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/tfmodel.py new file mode 100644 index 0000000000000000000000000000000000000000..ba710798c5ab49936bd63c914f20da516cbc6af9 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/tfmodel.py @@ -0,0 +1,317 @@ +import warnings + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.modules import rnn + +import torch.backends.cuda + + +class TimeFrequencyModellingModule(nn.Module): + def __init__(self) -> None: + super().__init__() + + +class ResidualRNN(nn.Module): + def __init__( + self, + emb_dim: int, + rnn_dim: int, + bidirectional: bool = True, + rnn_type: str = "LSTM", + use_batch_trick: bool = True, + use_layer_norm: bool = True, + ) -> None: + # n_group is the size of the 2nd dim + super().__init__() + + self.use_layer_norm = use_layer_norm + if use_layer_norm: + self.norm = nn.LayerNorm(emb_dim) + else: + self.norm = nn.GroupNorm(num_groups=emb_dim, num_channels=emb_dim) + + self.rnn = rnn.__dict__[rnn_type]( + input_size=emb_dim, + hidden_size=rnn_dim, + num_layers=1, + batch_first=True, + bidirectional=bidirectional, + ) + + self.fc = nn.Linear( + in_features=rnn_dim * (2 if bidirectional else 1), + out_features=emb_dim + ) + + self.use_batch_trick = use_batch_trick + if not self.use_batch_trick: + warnings.warn("NOT USING BATCH TRICK IS EXTREMELY SLOW!!") + + def forward(self, z): + # z = (batch, n_uncrossed, n_across, emb_dim) + + z0 = torch.clone(z) + + # print(z.device) + + if self.use_layer_norm: + z = self.norm(z) # (batch, n_uncrossed, n_across, emb_dim) + else: + z = torch.permute( + z, (0, 3, 1, 2) + ) # (batch, emb_dim, n_uncrossed, n_across) + + z = self.norm(z) # (batch, emb_dim, n_uncrossed, n_across) + + z = torch.permute( + z, (0, 2, 3, 1) + ) # (batch, n_uncrossed, n_across, emb_dim) + + batch, n_uncrossed, n_across, emb_dim = z.shape + + if self.use_batch_trick: + z = torch.reshape(z, (batch * n_uncrossed, n_across, emb_dim)) + + z = self.rnn(z.contiguous())[0] # (batch * n_uncrossed, n_across, dir_rnn_dim) + + z = torch.reshape(z, (batch, n_uncrossed, n_across, -1)) + # (batch, n_uncrossed, n_across, dir_rnn_dim) + else: + # Note: this is EXTREMELY SLOW + zlist = [] + for i in range(n_uncrossed): + zi = self.rnn(z[:, i, :, :])[0] # (batch, n_across, emb_dim) + zlist.append(zi) + + z = torch.stack( + zlist, + dim=1 + ) # (batch, n_uncrossed, n_across, dir_rnn_dim) + + z = self.fc(z) # (batch, n_uncrossed, n_across, emb_dim) + + z = z + z0 + + return z + + +class SeqBandModellingModule(TimeFrequencyModellingModule): + def __init__( + self, + n_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + parallel_mode=False, + ) -> None: + super().__init__() + self.seqband = nn.ModuleList([]) + + if parallel_mode: + for _ in range(n_modules): + self.seqband.append( + nn.ModuleList( + [ResidualRNN( + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + ), + ResidualRNN( + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + )] + ) + ) + else: + + for _ in range(2 * n_modules): + self.seqband.append( + ResidualRNN( + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + ) + ) + + self.parallel_mode = parallel_mode + + def forward(self, z): + # z = (batch, n_bands, n_time, emb_dim) + + if self.parallel_mode: + for sbm_pair in self.seqband: + # z: (batch, n_bands, n_time, emb_dim) + sbm_t, sbm_f = sbm_pair[0], sbm_pair[1] + zt = sbm_t(z) # (batch, n_bands, n_time, emb_dim) + zf = sbm_f(z.transpose(1, 2)) # (batch, n_time, n_bands, emb_dim) + z = zt + zf.transpose(1, 2) + else: + for sbm in self.seqband: + z = sbm(z) + z = z.transpose(1, 2) + + # (batch, n_bands, n_time, emb_dim) + # --> (batch, n_time, n_bands, emb_dim) + # OR + # (batch, n_time, n_bands, emb_dim) + # --> (batch, n_bands, n_time, emb_dim) + + q = z + return q # (batch, n_bands, n_time, emb_dim) + + +class ResidualTransformer(nn.Module): + def __init__( + self, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + dropout: float = 0.0, + ) -> None: + # n_group is the size of the 2nd dim + super().__init__() + + self.tf = nn.TransformerEncoderLayer( + d_model=emb_dim, + nhead=4, + dim_feedforward=rnn_dim, + batch_first=True + ) + + self.is_causal = not bidirectional + self.dropout = dropout + + def forward(self, z): + batch, n_uncrossed, n_across, emb_dim = z.shape + z = torch.reshape(z, (batch * n_uncrossed, n_across, emb_dim)) + z = self.tf(z, is_causal=self.is_causal) # (batch, n_uncrossed, n_across, emb_dim) + z = torch.reshape(z, (batch, n_uncrossed, n_across, emb_dim)) + + return z + + +class TransformerTimeFreqModule(TimeFrequencyModellingModule): + def __init__( + self, + n_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + dropout: float = 0.0, + ) -> None: + super().__init__() + self.norm = nn.LayerNorm(emb_dim) + self.seqband = nn.ModuleList([]) + + for _ in range(2 * n_modules): + self.seqband.append( + ResidualTransformer( + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + dropout=dropout, + ) + ) + + def forward(self, z): + # z = (batch, n_bands, n_time, emb_dim) + z = self.norm(z) # (batch, n_bands, n_time, emb_dim) + + for sbm in self.seqband: + z = sbm(z) + z = z.transpose(1, 2) + + # (batch, n_bands, n_time, emb_dim) + # --> (batch, n_time, n_bands, emb_dim) + # OR + # (batch, n_time, n_bands, emb_dim) + # --> (batch, n_bands, n_time, emb_dim) + + q = z + return q # (batch, n_bands, n_time, emb_dim) + + + +class ResidualConvolution(nn.Module): + def __init__( + self, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + dropout: float = 0.0, + ) -> None: + # n_group is the size of the 2nd dim + super().__init__() + self.norm = nn.InstanceNorm2d(emb_dim, affine=True) + + self.conv = nn.Sequential( + nn.Conv2d( + in_channels=emb_dim, + out_channels=rnn_dim, + kernel_size=(3, 3), + padding="same", + stride=(1, 1), + ), + nn.Tanhshrink() + ) + + self.is_causal = not bidirectional + self.dropout = dropout + + self.fc = nn.Conv2d( + in_channels=rnn_dim, + out_channels=emb_dim, + kernel_size=(1, 1), + padding="same", + stride=(1, 1), + ) + + + def forward(self, z): + # z = (batch, n_uncrossed, n_across, emb_dim) + + z0 = torch.clone(z) + + z = self.norm(z) # (batch, n_uncrossed, n_across, emb_dim) + z = self.conv(z) # (batch, n_uncrossed, n_across, emb_dim) + z = self.fc(z) # (batch, n_uncrossed, n_across, emb_dim) + z = z + z0 + + return z + + +class ConvolutionalTimeFreqModule(TimeFrequencyModellingModule): + def __init__( + self, + n_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + dropout: float = 0.0, + ) -> None: + super().__init__() + self.seqband = torch.jit.script(nn.Sequential( + *[ResidualConvolution( + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + dropout=dropout, + ) for _ in range(2 * n_modules) ])) + + def forward(self, z): + # z = (batch, n_bands, n_time, emb_dim) + + z = torch.permute(z, (0, 3, 1, 2)) # (batch, emb_dim, n_bands, n_time) + + z = self.seqband(z) # (batch, emb_dim, n_bands, n_time) + + z = torch.permute(z, (0, 2, 3, 1)) # (batch, n_bands, n_time, emb_dim) + + return z diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/utils.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf8636e65fe9e7fdd13fa063760018df90a01cff --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/utils.py @@ -0,0 +1,583 @@ +import os +from abc import abstractmethod +from typing import Any, Callable + +import numpy as np +import torch +from librosa import hz_to_midi, midi_to_hz +from torch import Tensor +from torchaudio import functional as taF +from spafe.fbanks import bark_fbanks +from spafe.utils.converters import erb2hz, hz2bark, hz2erb +from torchaudio.functional.functional import _create_triangular_filterbank + + +def band_widths_from_specs(band_specs): + return [e - i for i, e in band_specs] + + +def check_nonzero_bandwidth(band_specs): + # pprint(band_specs) + for fstart, fend in band_specs: + if fend - fstart <= 0: + raise ValueError("Bands cannot be zero-width") + + +def check_no_overlap(band_specs): + fend_prev = -1 + for fstart_curr, fend_curr in band_specs: + if fstart_curr <= fend_prev: + raise ValueError("Bands cannot overlap") + + +def check_no_gap(band_specs): + fstart, _ = band_specs[0] + assert fstart == 0 + + fend_prev = -1 + for fstart_curr, fend_curr in band_specs: + if fstart_curr - fend_prev > 1: + raise ValueError("Bands cannot leave gap") + fend_prev = fend_curr + + +class BandsplitSpecification: + def __init__(self, nfft: int, fs: int) -> None: + self.fs = fs + self.nfft = nfft + self.nyquist = fs / 2 + self.max_index = nfft // 2 + 1 + + self.split500 = self.hertz_to_index(500) + self.split1k = self.hertz_to_index(1000) + self.split2k = self.hertz_to_index(2000) + self.split4k = self.hertz_to_index(4000) + self.split8k = self.hertz_to_index(8000) + self.split16k = self.hertz_to_index(16000) + self.split20k = self.hertz_to_index(20000) + + self.above20k = [(self.split20k, self.max_index)] + self.above16k = [(self.split16k, self.split20k)] + self.above20k + + def index_to_hertz(self, index: int): + return index * self.fs / self.nfft + + def hertz_to_index(self, hz: float, round: bool = True): + index = hz * self.nfft / self.fs + + if round: + index = int(np.round(index)) + + return index + + def get_band_specs_with_bandwidth( + self, + start_index, + end_index, + bandwidth_hz + ): + band_specs = [] + lower = start_index + + while lower < end_index: + upper = int(np.floor(lower + self.hertz_to_index(bandwidth_hz))) + upper = min(upper, end_index) + + band_specs.append((lower, upper)) + lower = upper + + return band_specs + + @abstractmethod + def get_band_specs(self): + raise NotImplementedError + + +class VocalBandsplitSpecification(BandsplitSpecification): + def __init__(self, nfft: int, fs: int, version: str = "7") -> None: + super().__init__(nfft=nfft, fs=fs) + + self.version = version + + def get_band_specs(self): + return getattr(self, f"version{self.version}")() + + @property + def version1(self): + return self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.max_index, bandwidth_hz=1000 + ) + + def version2(self): + below16k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split16k, bandwidth_hz=1000 + ) + below20k = self.get_band_specs_with_bandwidth( + start_index=self.split16k, + end_index=self.split20k, + bandwidth_hz=2000 + ) + + return below16k + below20k + self.above20k + + def version3(self): + below8k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split8k, bandwidth_hz=1000 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=2000 + ) + + return below8k + below16k + self.above16k + + def version4(self): + below1k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split1k, bandwidth_hz=100 + ) + below8k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split8k, + bandwidth_hz=1000 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=2000 + ) + + return below1k + below8k + below16k + self.above16k + + def version5(self): + below1k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split1k, bandwidth_hz=100 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split16k, + bandwidth_hz=1000 + ) + below20k = self.get_band_specs_with_bandwidth( + start_index=self.split16k, + end_index=self.split20k, + bandwidth_hz=2000 + ) + return below1k + below16k + below20k + self.above20k + + def version6(self): + below1k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split1k, bandwidth_hz=100 + ) + below4k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split4k, + bandwidth_hz=500 + ) + below8k = self.get_band_specs_with_bandwidth( + start_index=self.split4k, + end_index=self.split8k, + bandwidth_hz=1000 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=2000 + ) + return below1k + below4k + below8k + below16k + self.above16k + + def version7(self): + below1k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split1k, bandwidth_hz=100 + ) + below4k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split4k, + bandwidth_hz=250 + ) + below8k = self.get_band_specs_with_bandwidth( + start_index=self.split4k, + end_index=self.split8k, + bandwidth_hz=500 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=1000 + ) + below20k = self.get_band_specs_with_bandwidth( + start_index=self.split16k, + end_index=self.split20k, + bandwidth_hz=2000 + ) + return below1k + below4k + below8k + below16k + below20k + self.above20k + + +class OtherBandsplitSpecification(VocalBandsplitSpecification): + def __init__(self, nfft: int, fs: int) -> None: + super().__init__(nfft=nfft, fs=fs, version="7") + + +class BassBandsplitSpecification(BandsplitSpecification): + def __init__(self, nfft: int, fs: int, version: str = "7") -> None: + super().__init__(nfft=nfft, fs=fs) + + def get_band_specs(self): + below500 = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split500, bandwidth_hz=50 + ) + below1k = self.get_band_specs_with_bandwidth( + start_index=self.split500, + end_index=self.split1k, + bandwidth_hz=100 + ) + below4k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split4k, + bandwidth_hz=500 + ) + below8k = self.get_band_specs_with_bandwidth( + start_index=self.split4k, + end_index=self.split8k, + bandwidth_hz=1000 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=2000 + ) + above16k = [(self.split16k, self.max_index)] + + return below500 + below1k + below4k + below8k + below16k + above16k + + +class DrumBandsplitSpecification(BandsplitSpecification): + def __init__(self, nfft: int, fs: int) -> None: + super().__init__(nfft=nfft, fs=fs) + + def get_band_specs(self): + below1k = self.get_band_specs_with_bandwidth( + start_index=0, end_index=self.split1k, bandwidth_hz=50 + ) + below2k = self.get_band_specs_with_bandwidth( + start_index=self.split1k, + end_index=self.split2k, + bandwidth_hz=100 + ) + below4k = self.get_band_specs_with_bandwidth( + start_index=self.split2k, + end_index=self.split4k, + bandwidth_hz=250 + ) + below8k = self.get_band_specs_with_bandwidth( + start_index=self.split4k, + end_index=self.split8k, + bandwidth_hz=500 + ) + below16k = self.get_band_specs_with_bandwidth( + start_index=self.split8k, + end_index=self.split16k, + bandwidth_hz=1000 + ) + above16k = [(self.split16k, self.max_index)] + + return below1k + below2k + below4k + below8k + below16k + above16k + + + + +class PerceptualBandsplitSpecification(BandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + fbank_fn: Callable[[int, int, float, float, int], torch.Tensor], + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(nfft=nfft, fs=fs) + self.n_bands = n_bands + if f_max is None: + f_max = fs / 2 + + self.filterbank = fbank_fn( + n_bands, fs, f_min, f_max, self.max_index + ) + + weight_per_bin = torch.sum( + self.filterbank, + dim=0, + keepdim=True + ) # (1, n_freqs) + normalized_mel_fb = self.filterbank / weight_per_bin # (n_mels, n_freqs) + + freq_weights = [] + band_specs = [] + for i in range(self.n_bands): + active_bins = torch.nonzero(self.filterbank[i, :]).squeeze().tolist() + if isinstance(active_bins, int): + active_bins = (active_bins, active_bins) + if len(active_bins) == 0: + continue + start_index = active_bins[0] + end_index = active_bins[-1] + 1 + band_specs.append((start_index, end_index)) + freq_weights.append(normalized_mel_fb[i, start_index:end_index]) + + self.freq_weights = freq_weights + self.band_specs = band_specs + + def get_band_specs(self): + return self.band_specs + + def get_freq_weights(self): + return self.freq_weights + + def save_to_file(self, dir_path: str) -> None: + + os.makedirs(dir_path, exist_ok=True) + + import pickle + + with open(os.path.join(dir_path, "mel_bandsplit_spec.pkl"), "wb") as f: + pickle.dump( + { + "band_specs": self.band_specs, + "freq_weights": self.freq_weights, + "filterbank": self.filterbank, + }, + f, + ) + +def mel_filterbank(n_bands, fs, f_min, f_max, n_freqs): + fb = taF.melscale_fbanks( + n_mels=n_bands, + sample_rate=fs, + f_min=f_min, + f_max=f_max, + n_freqs=n_freqs, + ).T + + fb[0, 0] = 1.0 + + return fb + + +class MelBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=mel_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + +def musical_filterbank(n_bands, fs, f_min, f_max, n_freqs, + scale="constant"): + + nfft = 2 * (n_freqs - 1) + df = fs / nfft + # init freqs + f_max = f_max or fs / 2 + f_min = f_min or 0 + f_min = fs / nfft + + n_octaves = np.log2(f_max / f_min) + n_octaves_per_band = n_octaves / n_bands + bandwidth_mult = np.power(2.0, n_octaves_per_band) + + low_midi = max(0, hz_to_midi(f_min)) + high_midi = hz_to_midi(f_max) + midi_points = np.linspace(low_midi, high_midi, n_bands) + hz_pts = midi_to_hz(midi_points) + + low_pts = hz_pts / bandwidth_mult + high_pts = hz_pts * bandwidth_mult + + low_bins = np.floor(low_pts / df).astype(int) + high_bins = np.ceil(high_pts / df).astype(int) + + fb = np.zeros((n_bands, n_freqs)) + + for i in range(n_bands): + fb[i, low_bins[i]:high_bins[i]+1] = 1.0 + + fb[0, :low_bins[0]] = 1.0 + fb[-1, high_bins[-1]+1:] = 1.0 + + return torch.as_tensor(fb) + +class MusicalBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=musical_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + + +def bark_filterbank( + n_bands, fs, f_min, f_max, n_freqs +): + nfft = 2 * (n_freqs -1) + fb, _ = bark_fbanks.bark_filter_banks( + nfilts=n_bands, + nfft=nfft, + fs=fs, + low_freq=f_min, + high_freq=f_max, + scale="constant" + ) + + return torch.as_tensor(fb) + +class BarkBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=bark_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + + +def triangular_bark_filterbank( + n_bands, fs, f_min, f_max, n_freqs +): + + all_freqs = torch.linspace(0, fs // 2, n_freqs) + + # calculate mel freq bins + m_min = hz2bark(f_min) + m_max = hz2bark(f_max) + + m_pts = torch.linspace(m_min, m_max, n_bands + 2) + f_pts = 600 * torch.sinh(m_pts / 6) + + # create filterbank + fb = _create_triangular_filterbank(all_freqs, f_pts) + + fb = fb.T + + first_active_band = torch.nonzero(torch.sum(fb, dim=-1))[0, 0] + first_active_bin = torch.nonzero(fb[first_active_band, :])[0, 0] + + fb[first_active_band, :first_active_bin] = 1.0 + + return fb + +class TriangularBarkBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=triangular_bark_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + + + +def minibark_filterbank( + n_bands, fs, f_min, f_max, n_freqs +): + fb = bark_filterbank( + n_bands, + fs, + f_min, + f_max, + n_freqs + ) + + fb[fb < np.sqrt(0.5)] = 0.0 + + return fb + +class MiniBarkBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=minibark_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + + + + + +def erb_filterbank( + n_bands: int, + fs: int, + f_min: float, + f_max: float, + n_freqs: int, +) -> Tensor: + # freq bins + A = (1000 * np.log(10)) / (24.7 * 4.37) + all_freqs = torch.linspace(0, fs // 2, n_freqs) + + # calculate mel freq bins + m_min = hz2erb(f_min) + m_max = hz2erb(f_max) + + m_pts = torch.linspace(m_min, m_max, n_bands + 2) + f_pts = (torch.pow(10, (m_pts / A)) - 1)/ 0.00437 + + # create filterbank + fb = _create_triangular_filterbank(all_freqs, f_pts) + + fb = fb.T + + + first_active_band = torch.nonzero(torch.sum(fb, dim=-1))[0, 0] + first_active_bin = torch.nonzero(fb[first_active_band, :])[0, 0] + + fb[first_active_band, :first_active_bin] = 1.0 + + return fb + + + +class EquivalentRectangularBandsplitSpecification(PerceptualBandsplitSpecification): + def __init__( + self, + nfft: int, + fs: int, + n_bands: int, + f_min: float = 0.0, + f_max: float = None + ) -> None: + super().__init__(fbank_fn=erb_filterbank, nfft=nfft, fs=fs, n_bands=n_bands, f_min=f_min, f_max=f_max) + +if __name__ == "__main__": + import pandas as pd + + band_defs = [] + + for bands in [VocalBandsplitSpecification]: + band_name = bands.__name__.replace("BandsplitSpecification", "") + + mbs = bands(nfft=2048, fs=44100).get_band_specs() + + for i, (f_min, f_max) in enumerate(mbs): + band_defs.append({ + "band": band_name, + "band_index": i, + "f_min": f_min, + "f_max": f_max + }) + + df = pd.DataFrame(band_defs) + df.to_csv("vox7bands.csv", index=False) \ No newline at end of file diff --git a/data_pipeline/seperation/models/bandit/core/model/bsrnn/wrapper.py b/data_pipeline/seperation/models/bandit/core/model/bsrnn/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..a31c087db33eb215effa3c3fc492999c5672c55e --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/model/bsrnn/wrapper.py @@ -0,0 +1,882 @@ +from pprint import pprint +from typing import Dict, List, Optional, Tuple, Union + +import torch +from torch import nn + +from models.bandit.core.model._spectral import _SpectralComponent +from models.bandit.core.model.bsrnn.utils import ( + BarkBandsplitSpecification, BassBandsplitSpecification, + DrumBandsplitSpecification, + EquivalentRectangularBandsplitSpecification, MelBandsplitSpecification, + MusicalBandsplitSpecification, OtherBandsplitSpecification, + TriangularBarkBandsplitSpecification, VocalBandsplitSpecification, +) +from .core import ( + MultiSourceMultiMaskBandSplitCoreConv, + MultiSourceMultiMaskBandSplitCoreRNN, + MultiSourceMultiMaskBandSplitCoreTransformer, + MultiSourceMultiPatchingMaskBandSplitCoreRNN, SingleMaskBandsplitCoreRNN, + SingleMaskBandsplitCoreTransformer, +) + +import pytorch_lightning as pl + +def get_band_specs(band_specs, n_fft, fs, n_bands=None): + if band_specs in ["dnr:speech", "dnr:vox7", "musdb:vocals", "musdb:vox7"]: + bsm = VocalBandsplitSpecification( + nfft=n_fft, fs=fs + ).get_band_specs() + freq_weights = None + overlapping_band = False + elif "tribark" in band_specs: + assert n_bands is not None + specs = TriangularBarkBandsplitSpecification( + nfft=n_fft, + fs=fs, + n_bands=n_bands + ) + bsm = specs.get_band_specs() + freq_weights = specs.get_freq_weights() + overlapping_band = True + elif "bark" in band_specs: + assert n_bands is not None + specs = BarkBandsplitSpecification( + nfft=n_fft, + fs=fs, + n_bands=n_bands + ) + bsm = specs.get_band_specs() + freq_weights = specs.get_freq_weights() + overlapping_band = True + elif "erb" in band_specs: + assert n_bands is not None + specs = EquivalentRectangularBandsplitSpecification( + nfft=n_fft, + fs=fs, + n_bands=n_bands + ) + bsm = specs.get_band_specs() + freq_weights = specs.get_freq_weights() + overlapping_band = True + elif "musical" in band_specs: + assert n_bands is not None + specs = MusicalBandsplitSpecification( + nfft=n_fft, + fs=fs, + n_bands=n_bands + ) + bsm = specs.get_band_specs() + freq_weights = specs.get_freq_weights() + overlapping_band = True + elif band_specs == "dnr:mel" or "mel" in band_specs: + assert n_bands is not None + specs = MelBandsplitSpecification( + nfft=n_fft, + fs=fs, + n_bands=n_bands + ) + bsm = specs.get_band_specs() + freq_weights = specs.get_freq_weights() + overlapping_band = True + else: + raise NameError + + return bsm, freq_weights, overlapping_band + + +def get_band_specs_map(band_specs_map, n_fft, fs, n_bands=None): + if band_specs_map == "musdb:all": + bsm = { + "vocals": VocalBandsplitSpecification( + nfft=n_fft, fs=fs + ).get_band_specs(), + "drums": DrumBandsplitSpecification( + nfft=n_fft, fs=fs + ).get_band_specs(), + "bass": BassBandsplitSpecification( + nfft=n_fft, fs=fs + ).get_band_specs(), + "other": OtherBandsplitSpecification( + nfft=n_fft, fs=fs + ).get_band_specs(), + } + freq_weights = None + overlapping_band = False + elif band_specs_map == "dnr:vox7": + bsm_, freq_weights, overlapping_band = get_band_specs( + "dnr:speech", n_fft, fs, n_bands + ) + bsm = { + "speech": bsm_, + "music": bsm_, + "effects": bsm_ + } + elif "dnr:vox7:" in band_specs_map: + stem = band_specs_map.split(":")[-1] + bsm_, freq_weights, overlapping_band = get_band_specs( + "dnr:speech", n_fft, fs, n_bands + ) + bsm = { + stem: bsm_ + } + else: + raise NameError + + return bsm, freq_weights, overlapping_band + + +class BandSplitWrapperBase(pl.LightningModule): + bsrnn: nn.Module + + def __init__(self, **kwargs): + super().__init__() + + +class SingleMaskMultiSourceBandSplitBase( + BandSplitWrapperBase, + _SpectralComponent +): + def __init__( + self, + band_specs_map: Union[str, Dict[str, List[Tuple[float, float]]]], + fs: int = 44100, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + ) -> None: + super().__init__( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + ) + + if isinstance(band_specs_map, str): + self.band_specs_map, self.freq_weights, self.overlapping_band = get_band_specs_map( + band_specs_map, + n_fft, + fs, + n_bands=n_bands + ) + + self.stems = list(self.band_specs_map.keys()) + + def forward(self, batch): + audio = batch["audio"] + + with torch.no_grad(): + batch["spectrogram"] = {stem: self.stft(audio[stem]) for stem in + audio} + + X = batch["spectrogram"]["mixture"] + length = batch["audio"]["mixture"].shape[-1] + + output = {"spectrogram": {}, "audio": {}} + + for stem, bsrnn in self.bsrnn.items(): + S = bsrnn(X) + s = self.istft(S, length) + output["spectrogram"][stem] = S + output["audio"][stem] = s + + return batch, output + + +class MultiMaskMultiSourceBandSplitBase( + BandSplitWrapperBase, + _SpectralComponent +): + def __init__( + self, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + ) -> None: + super().__init__( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + ) + + if isinstance(band_specs, str): + self.band_specs, self.freq_weights, self.overlapping_band = get_band_specs( + band_specs, + n_fft, + fs, + n_bands + ) + + self.stems = stems + + def forward(self, batch): + # with torch.no_grad(): + audio = batch["audio"] + cond = batch.get("condition", None) + with torch.no_grad(): + batch["spectrogram"] = {stem: self.stft(audio[stem]) for stem in + audio} + + X = batch["spectrogram"]["mixture"] + length = batch["audio"]["mixture"].shape[-1] + + output = self.bsrnn(X, cond=cond) + output["audio"] = {} + + for stem, S in output["spectrogram"].items(): + s = self.istft(S, length) + output["audio"][stem] = s + + return batch, output + + +class MultiMaskMultiSourceBandSplitBaseSimple( + BandSplitWrapperBase, + _SpectralComponent +): + def __init__( + self, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + ) -> None: + super().__init__( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + ) + + if isinstance(band_specs, str): + self.band_specs, self.freq_weights, self.overlapping_band = get_band_specs( + band_specs, + n_fft, + fs, + n_bands + ) + + self.stems = stems + + def forward(self, batch): + with torch.no_grad(): + X = self.stft(batch) + length = batch.shape[-1] + output = self.bsrnn(X, cond=None) + res = [] + for stem, S in output["spectrogram"].items(): + s = self.istft(S, length) + res.append(s) + res = torch.stack(res, dim=1) + return res + + +class SingleMaskMultiSourceBandSplitRNN(SingleMaskMultiSourceBandSplitBase): + def __init__( + self, + in_channel: int, + band_specs_map: Union[str, Dict[str, List[Tuple[float, float]]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + ) -> None: + super().__init__( + band_specs_map=band_specs_map, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + ) + + self.bsrnn = nn.ModuleDict( + { + src: SingleMaskBandsplitCoreRNN( + band_specs=specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + for src, specs in self.band_specs_map.items() + } + ) + + +class SingleMaskMultiSourceBandSplitTransformer( + SingleMaskMultiSourceBandSplitBase +): + def __init__( + self, + in_channel: int, + band_specs_map: Union[str, Dict[str, List[Tuple[float, float]]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + tf_dropout: float = 0.0, + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + ) -> None: + super().__init__( + band_specs_map=band_specs_map, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + ) + + self.bsrnn = nn.ModuleDict( + { + src: SingleMaskBandsplitCoreTransformer( + band_specs=specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + tf_dropout=tf_dropout, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + ) + for src, specs in self.band_specs_map.items() + } + ) + + +class MultiMaskMultiSourceBandSplitRNN(MultiMaskMultiSourceBandSplitBase): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + cond_dim: int = 0, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + use_freq_weights: bool = True, + normalize_input: bool = False, + mult_add_mask: bool = False, + freeze_encoder: bool = False, + ) -> None: + super().__init__( + stems=stems, + band_specs=band_specs, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + n_bands=n_bands, + ) + + self.bsrnn = MultiSourceMultiMaskBandSplitCoreRNN( + stems=stems, + band_specs=self.band_specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=self.overlapping_band, + freq_weights=self.freq_weights, + n_freq=n_fft // 2 + 1, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + self.normalize_input = normalize_input + self.cond_dim = cond_dim + + if freeze_encoder: + for param in self.bsrnn.band_split.parameters(): + param.requires_grad = False + + for param in self.bsrnn.tf_model.parameters(): + param.requires_grad = False + + +class MultiMaskMultiSourceBandSplitRNNSimple(MultiMaskMultiSourceBandSplitBaseSimple): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + cond_dim: int = 0, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + use_freq_weights: bool = True, + normalize_input: bool = False, + mult_add_mask: bool = False, + freeze_encoder: bool = False, + ) -> None: + super().__init__( + stems=stems, + band_specs=band_specs, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + n_bands=n_bands, + ) + + self.bsrnn = MultiSourceMultiMaskBandSplitCoreRNN( + stems=stems, + band_specs=self.band_specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=self.overlapping_band, + freq_weights=self.freq_weights, + n_freq=n_fft // 2 + 1, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + self.normalize_input = normalize_input + self.cond_dim = cond_dim + + if freeze_encoder: + for param in self.bsrnn.band_split.parameters(): + param.requires_grad = False + + for param in self.bsrnn.tf_model.parameters(): + param.requires_grad = False + + +class MultiMaskMultiSourceBandSplitTransformer( + MultiMaskMultiSourceBandSplitBase +): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + cond_dim: int = 0, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + use_freq_weights: bool = True, + normalize_input: bool = False, + mult_add_mask: bool = False + ) -> None: + super().__init__( + stems=stems, + band_specs=band_specs, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + n_bands=n_bands, + ) + + self.bsrnn = MultiSourceMultiMaskBandSplitCoreTransformer( + stems=stems, + band_specs=self.band_specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=self.overlapping_band, + freq_weights=self.freq_weights, + n_freq=n_fft // 2 + 1, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) + + + +class MultiMaskMultiSourceBandSplitConv( + MultiMaskMultiSourceBandSplitBase +): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + cond_dim: int = 0, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + use_freq_weights: bool = True, + normalize_input: bool = False, + mult_add_mask: bool = False + ) -> None: + super().__init__( + stems=stems, + band_specs=band_specs, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + n_bands=n_bands, + ) + + self.bsrnn = MultiSourceMultiMaskBandSplitCoreConv( + stems=stems, + band_specs=self.band_specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + cond_dim=cond_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=self.overlapping_band, + freq_weights=self.freq_weights, + n_freq=n_fft // 2 + 1, + use_freq_weights=use_freq_weights, + mult_add_mask=mult_add_mask + ) +class PatchingMaskMultiSourceBandSplitRNN(MultiMaskMultiSourceBandSplitBase): + def __init__( + self, + in_channel: int, + stems: List[str], + band_specs: Union[str, List[Tuple[float, float]]], + kernel_norm_mlp_version: int = 1, + mask_kernel_freq: int = 3, + mask_kernel_time: int = 3, + conv_kernel_freq: int = 1, + conv_kernel_time: int = 1, + fs: int = 44100, + require_no_overlap: bool = False, + require_no_gap: bool = True, + normalize_channel_independently: bool = False, + treat_channel_as_feature: bool = True, + n_sqm_modules: int = 12, + emb_dim: int = 128, + rnn_dim: int = 256, + bidirectional: bool = True, + rnn_type: str = "LSTM", + mlp_dim: int = 512, + hidden_activation: str = "Tanh", + hidden_activation_kwargs: Optional[Dict] = None, + complex_mask: bool = True, + n_fft: int = 2048, + win_length: Optional[int] = 2048, + hop_length: int = 512, + window_fn: str = "hann_window", + wkwargs: Optional[Dict] = None, + power: Optional[int] = None, + center: bool = True, + normalized: bool = True, + pad_mode: str = "constant", + onesided: bool = True, + n_bands: int = None, + ) -> None: + super().__init__( + stems=stems, + band_specs=band_specs, + fs=fs, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + window_fn=window_fn, + wkwargs=wkwargs, + power=power, + center=center, + normalized=normalized, + pad_mode=pad_mode, + onesided=onesided, + n_bands=n_bands, + ) + + self.bsrnn = MultiSourceMultiPatchingMaskBandSplitCoreRNN( + stems=stems, + band_specs=self.band_specs, + in_channel=in_channel, + require_no_overlap=require_no_overlap, + require_no_gap=require_no_gap, + normalize_channel_independently=normalize_channel_independently, + treat_channel_as_feature=treat_channel_as_feature, + n_sqm_modules=n_sqm_modules, + emb_dim=emb_dim, + rnn_dim=rnn_dim, + bidirectional=bidirectional, + rnn_type=rnn_type, + mlp_dim=mlp_dim, + hidden_activation=hidden_activation, + hidden_activation_kwargs=hidden_activation_kwargs, + complex_mask=complex_mask, + overlapping_band=self.overlapping_band, + freq_weights=self.freq_weights, + n_freq=n_fft // 2 + 1, + mask_kernel_freq=mask_kernel_freq, + mask_kernel_time=mask_kernel_time, + conv_kernel_freq=conv_kernel_freq, + conv_kernel_time=conv_kernel_time, + kernel_norm_mlp_version=kernel_norm_mlp_version, + ) diff --git a/data_pipeline/seperation/models/bandit/core/utils/__init__.py b/data_pipeline/seperation/models/bandit/core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data_pipeline/seperation/models/bandit/core/utils/audio.py b/data_pipeline/seperation/models/bandit/core/utils/audio.py new file mode 100644 index 0000000000000000000000000000000000000000..e4066d7dbad39e4803e76659cf51e30f38df23b3 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/core/utils/audio.py @@ -0,0 +1,463 @@ +from collections import defaultdict + +from tqdm import tqdm +from typing import Callable, Dict, List, Optional, Tuple + +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F + + +@torch.jit.script +def merge( + combined: torch.Tensor, + original_batch_size: int, + n_channel: int, + n_chunks: int, + chunk_size: int, ): + combined = torch.reshape( + combined, + (original_batch_size, n_chunks, n_channel, chunk_size) + ) + combined = torch.permute(combined, (0, 2, 3, 1)).reshape( + original_batch_size * n_channel, + chunk_size, + n_chunks + ) + + return combined + + +@torch.jit.script +def unfold( + padded_audio: torch.Tensor, + original_batch_size: int, + n_channel: int, + chunk_size: int, + hop_size: int + ) -> torch.Tensor: + + unfolded_input = F.unfold( + padded_audio[:, :, None, :], + kernel_size=(1, chunk_size), + stride=(1, hop_size) + ) + + _, _, n_chunks = unfolded_input.shape + unfolded_input = unfolded_input.view( + original_batch_size, + n_channel, + chunk_size, + n_chunks + ) + unfolded_input = torch.permute( + unfolded_input, + (0, 3, 1, 2) + ).reshape( + original_batch_size * n_chunks, + n_channel, + chunk_size + ) + + return unfolded_input + + +@torch.jit.script +# @torch.compile +def merge_chunks_all( + combined: torch.Tensor, + original_batch_size: int, + n_channel: int, + n_samples: int, + n_padded_samples: int, + n_chunks: int, + chunk_size: int, + hop_size: int, + edge_frame_pad_sizes: Tuple[int, int], + standard_window: torch.Tensor, + first_window: torch.Tensor, + last_window: torch.Tensor +): + combined = merge( + combined, + original_batch_size, + n_channel, + n_chunks, + chunk_size + ) + + combined = combined * standard_window[:, None].to(combined.device) + + combined = F.fold( + combined.to(torch.float32), output_size=(1, n_padded_samples), + kernel_size=(1, chunk_size), + stride=(1, hop_size) + ) + + combined = combined.view( + original_batch_size, + n_channel, + n_padded_samples + ) + + pad_front, pad_back = edge_frame_pad_sizes + combined = combined[..., pad_front:-pad_back] + + combined = combined[..., :n_samples] + + return combined + + # @torch.jit.script + + +def merge_chunks_edge( + combined: torch.Tensor, + original_batch_size: int, + n_channel: int, + n_samples: int, + n_padded_samples: int, + n_chunks: int, + chunk_size: int, + hop_size: int, + edge_frame_pad_sizes: Tuple[int, int], + standard_window: torch.Tensor, + first_window: torch.Tensor, + last_window: torch.Tensor +): + combined = merge( + combined, + original_batch_size, + n_channel, + n_chunks, + chunk_size + ) + + combined[..., 0] = combined[..., 0] * first_window + combined[..., -1] = combined[..., -1] * last_window + combined[..., 1:-1] = combined[..., + 1:-1] * standard_window[:, None] + + combined = F.fold( + combined, output_size=(1, n_padded_samples), + kernel_size=(1, chunk_size), + stride=(1, hop_size) + ) + + combined = combined.view( + original_batch_size, + n_channel, + n_padded_samples + ) + + combined = combined[..., :n_samples] + + return combined + + +class BaseFader(nn.Module): + def __init__( + self, + chunk_size_second: float, + hop_size_second: float, + fs: int, + fade_edge_frames: bool, + batch_size: int, + ) -> None: + super().__init__() + + self.chunk_size = int(chunk_size_second * fs) + self.hop_size = int(hop_size_second * fs) + self.overlap_size = self.chunk_size - self.hop_size + self.fade_edge_frames = fade_edge_frames + self.batch_size = batch_size + + # @torch.jit.script + def prepare(self, audio): + + if self.fade_edge_frames: + audio = F.pad(audio, self.edge_frame_pad_sizes, mode="reflect") + + n_samples = audio.shape[-1] + n_chunks = int( + np.ceil((n_samples - self.chunk_size) / self.hop_size) + 1 + ) + + padded_size = (n_chunks - 1) * self.hop_size + self.chunk_size + pad_size = padded_size - n_samples + + padded_audio = F.pad(audio, (0, pad_size)) + + return padded_audio, n_chunks + + def forward( + self, + audio: torch.Tensor, + model_fn: Callable[[torch.Tensor], Dict[str, torch.Tensor]], + ): + + original_dtype = audio.dtype + original_device = audio.device + + audio = audio.to("cpu") + + original_batch_size, n_channel, n_samples = audio.shape + padded_audio, n_chunks = self.prepare(audio) + del audio + n_padded_samples = padded_audio.shape[-1] + + if n_channel > 1: + padded_audio = padded_audio.view( + original_batch_size * n_channel, 1, n_padded_samples + ) + + unfolded_input = unfold( + padded_audio, + original_batch_size, + n_channel, + self.chunk_size, self.hop_size + ) + + n_total_chunks, n_channel, chunk_size = unfolded_input.shape + + n_batch = np.ceil(n_total_chunks / self.batch_size).astype(int) + + chunks_in = [ + unfolded_input[ + b * self.batch_size:(b + 1) * self.batch_size, ...].clone() + for b in range(n_batch) + ] + + all_chunks_out = defaultdict( + lambda: torch.zeros_like( + unfolded_input, device="cpu" + ) + ) + + # for b, cin in enumerate(tqdm(chunks_in)): + for b, cin in enumerate(chunks_in): + if torch.allclose(cin, torch.tensor(0.0)): + del cin + continue + + chunks_out = model_fn(cin.to(original_device)) + del cin + for s, c in chunks_out.items(): + all_chunks_out[s][b * self.batch_size:(b + 1) * self.batch_size, + ...] = c.cpu() + del chunks_out + + del unfolded_input + del padded_audio + + if self.fade_edge_frames: + fn = merge_chunks_all + else: + fn = merge_chunks_edge + outputs = {} + + torch.cuda.empty_cache() + + for s, c in all_chunks_out.items(): + combined: torch.Tensor = fn( + c, + original_batch_size, + n_channel, + n_samples, + n_padded_samples, + n_chunks, + self.chunk_size, + self.hop_size, + self.edge_frame_pad_sizes, + self.standard_window, + self.__dict__.get("first_window", self.standard_window), + self.__dict__.get("last_window", self.standard_window) + ) + + outputs[s] = combined.to( + dtype=original_dtype, + device=original_device + ) + + return { + "audio": outputs + } + # + # def old_forward( + # self, + # audio: torch.Tensor, + # model_fn: Callable[[torch.Tensor], Dict[str, torch.Tensor]], + # ): + # + # n_samples = audio.shape[-1] + # original_batch_size = audio.shape[0] + # + # padded_audio, n_chunks = self.prepare(audio) + # + # ndim = padded_audio.ndim + # broadcaster = [1 for _ in range(ndim - 1)] + [self.chunk_size] + # + # outputs = defaultdict( + # lambda: torch.zeros_like( + # padded_audio, device=audio.device, dtype=torch.float64 + # ) + # ) + # + # all_chunks_out = [] + # len_chunks_in = [] + # + # batch_size_ = int(self.batch_size // original_batch_size) + # for b in range(int(np.ceil(n_chunks / batch_size_))): + # chunks_in = [] + # for j in range(batch_size_): + # i = b * batch_size_ + j + # if i == n_chunks: + # break + # + # start = i * hop_size + # end = start + self.chunk_size + # chunk_in = padded_audio[..., start:end] + # chunks_in.append(chunk_in) + # + # chunks_in = torch.concat(chunks_in, dim=0) + # chunks_out = model_fn(chunks_in) + # all_chunks_out.append(chunks_out) + # len_chunks_in.append(len(chunks_in)) + # + # for b, (chunks_out, lci) in enumerate( + # zip(all_chunks_out, len_chunks_in) + # ): + # for stem in chunks_out: + # for j in range(lci // original_batch_size): + # i = b * batch_size_ + j + # + # if self.fade_edge_frames: + # window = self.standard_window + # else: + # if i == 0: + # window = self.first_window + # elif i == n_chunks - 1: + # window = self.last_window + # else: + # window = self.standard_window + # + # start = i * hop_size + # end = start + self.chunk_size + # + # chunk_out = chunks_out[stem][j * original_batch_size: (j + 1) * original_batch_size, + # ...] + # contrib = window.view(*broadcaster) * chunk_out + # outputs[stem][..., start:end] = ( + # outputs[stem][..., start:end] + contrib + # ) + # + # if self.fade_edge_frames: + # pad_front, pad_back = self.edge_frame_pad_sizes + # outputs = {k: v[..., pad_front:-pad_back] for k, v in + # outputs.items()} + # + # outputs = {k: v[..., :n_samples].to(audio.dtype) for k, v in + # outputs.items()} + # + # return { + # "audio": outputs + # } + + +class LinearFader(BaseFader): + def __init__( + self, + chunk_size_second: float, + hop_size_second: float, + fs: int, + fade_edge_frames: bool = False, + batch_size: int = 1, + ) -> None: + + assert hop_size_second >= chunk_size_second / 2 + + super().__init__( + chunk_size_second=chunk_size_second, + hop_size_second=hop_size_second, + fs=fs, + fade_edge_frames=fade_edge_frames, + batch_size=batch_size, + ) + + in_fade = torch.linspace(0.0, 1.0, self.overlap_size + 1)[:-1] + out_fade = torch.linspace(1.0, 0.0, self.overlap_size + 1)[1:] + center_ones = torch.ones(self.chunk_size - 2 * self.overlap_size) + inout_ones = torch.ones(self.overlap_size) + + # using nn.Parameters allows lightning to take care of devices for us + self.register_buffer( + "standard_window", + torch.concat([in_fade, center_ones, out_fade]) + ) + + self.fade_edge_frames = fade_edge_frames + self.edge_frame_pad_size = (self.overlap_size, self.overlap_size) + + if not self.fade_edge_frames: + self.first_window = nn.Parameter( + torch.concat([inout_ones, center_ones, out_fade]), + requires_grad=False + ) + self.last_window = nn.Parameter( + torch.concat([in_fade, center_ones, inout_ones]), + requires_grad=False + ) + + +class OverlapAddFader(BaseFader): + def __init__( + self, + window_type: str, + chunk_size_second: float, + hop_size_second: float, + fs: int, + batch_size: int = 1, + ) -> None: + assert (chunk_size_second / hop_size_second) % 2 == 0 + assert int(chunk_size_second * fs) % 2 == 0 + + super().__init__( + chunk_size_second=chunk_size_second, + hop_size_second=hop_size_second, + fs=fs, + fade_edge_frames=True, + batch_size=batch_size, + ) + + self.hop_multiplier = self.chunk_size / (2 * self.hop_size) + # print(f"hop multiplier: {self.hop_multiplier}") + + self.edge_frame_pad_sizes = ( + 2 * self.overlap_size, + 2 * self.overlap_size + ) + + self.register_buffer( + "standard_window", torch.windows.__dict__[window_type]( + self.chunk_size, sym=False, # dtype=torch.float64 + ) / self.hop_multiplier + ) + + +if __name__ == "__main__": + import torchaudio as ta + fs = 44100 + ola = OverlapAddFader( + "hann", + 6.0, + 1.0, + fs, + batch_size=16 + ) + audio_, _ = ta.load( + "$DATA_ROOT/MUSDB18/HQ/canonical/test/BKS - Too " + "Much/vocals.wav" + ) + audio_ = audio_[None, ...] + out = ola(audio_, lambda x: {"stem": x})["audio"]["stem"] + print(torch.allclose(out, audio_)) diff --git a/data_pipeline/seperation/models/bandit/model_from_config.py b/data_pipeline/seperation/models/bandit/model_from_config.py new file mode 100644 index 0000000000000000000000000000000000000000..00ea586d7dfdbd6b89d6b7f2f400e6c8d04da5e4 --- /dev/null +++ b/data_pipeline/seperation/models/bandit/model_from_config.py @@ -0,0 +1,31 @@ +import sys +import os.path +import torch + +code_path = os.path.dirname(os.path.abspath(__file__)) + '/' +sys.path.append(code_path) + +import yaml +from ml_collections import ConfigDict + +torch.set_float32_matmul_precision("medium") + + +def get_model( + config_path, + weights_path, + device, +): + from models.bandit.core.model import MultiMaskMultiSourceBandSplitRNNSimple + + f = open(config_path) + config = ConfigDict(yaml.load(f, Loader=yaml.FullLoader)) + f.close() + + model = MultiMaskMultiSourceBandSplitRNNSimple( + **config.model + ) + d = torch.load(code_path + 'model_bandit_plus_dnr_sdr_11.47.chpt') + model.load_state_dict(d) + model.to(device) + return model, config diff --git a/data_pipeline/seperation/models/bs_roformer/__init__.py b/data_pipeline/seperation/models/bs_roformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..980e0afa5b7b4fd66168bce6905a94e7c91c380e --- /dev/null +++ b/data_pipeline/seperation/models/bs_roformer/__init__.py @@ -0,0 +1,2 @@ +from models.bs_roformer.bs_roformer import BSRoformer +from models.bs_roformer.mel_band_roformer import MelBandRoformer diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-38.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b4e9d65519e6c8d98b274753148dc1a7a4feb0e Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-38.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-39.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f908fd29523073f9fd57ff6d20e4efc4406ac39 Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/__init__.cpython-39.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-38.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f87eaf354e9f084148823b2f1464294189ca3d7e Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-38.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-39.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa70ebfdd9bdee03ce49897ea1af55078cc151f Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/attend.cpython-39.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-38.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4652f34dd5e5a9ac655e12fdbd18bbe87c019394 Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-38.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-39.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2de22a19d8d7879b8e3f86d6f826eda258fa27 Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/bs_roformer.cpython-39.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-38.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eb487a35af9541e5a1ca5cf99c63d981cbc2d0d Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-38.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-39.pyc b/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70a05f4105584401a4accc2e4b480f4300b8038c Binary files /dev/null and b/data_pipeline/seperation/models/bs_roformer/__pycache__/mel_band_roformer.cpython-39.pyc differ diff --git a/data_pipeline/seperation/models/bs_roformer/attend.py b/data_pipeline/seperation/models/bs_roformer/attend.py new file mode 100644 index 0000000000000000000000000000000000000000..34476c181629652e10ca866679abbbe4868927e6 --- /dev/null +++ b/data_pipeline/seperation/models/bs_roformer/attend.py @@ -0,0 +1,120 @@ +from functools import wraps +from packaging import version +from collections import namedtuple + +import torch +from torch import nn, einsum +import torch.nn.functional as F + +from einops import rearrange, reduce + +# constants + +FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) + +# helpers + +def exists(val): + return val is not None + +def default(v, d): + return v if exists(v) else d + +def once(fn): + called = False + @wraps(fn) + def inner(x): + nonlocal called + if called: + return + called = True + return fn(x) + return inner + +print_once = once(print) + +# main class + +class Attend(nn.Module): + def __init__( + self, + dropout = 0., + flash = False, + scale = None + ): + super().__init__() + self.scale = scale + self.dropout = dropout + self.attn_dropout = nn.Dropout(dropout) + + self.flash = flash + assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' + + # determine efficient attention configs for cuda and cpu + + self.cpu_config = FlashAttentionConfig(True, True, True) + self.cuda_config = None + + if not torch.cuda.is_available() or not flash: + return + + device_properties = torch.cuda.get_device_properties(torch.device('cuda')) + + if device_properties.major == 8 and device_properties.minor == 0: + print_once('A100 GPU detected, using flash attention if input tensor is on cuda') + self.cuda_config = FlashAttentionConfig(True, False, False) + else: + print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') + self.cuda_config = FlashAttentionConfig(False, True, True) + + def flash_attn(self, q, k, v): + _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device + + if exists(self.scale): + default_scale = q.shape[-1] ** -0.5 + q = q * (self.scale / default_scale) + + # Check if there is a compatible device for flash attention + + config = self.cuda_config if is_cuda else self.cpu_config + + # pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale + + with torch.backends.cuda.sdp_kernel(**config._asdict()): + out = F.scaled_dot_product_attention( + q, k, v, + dropout_p = self.dropout if self.training else 0. + ) + + return out + + def forward(self, q, k, v): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + q_len, k_len, device = q.shape[-2], k.shape[-2], q.device + + scale = default(self.scale, q.shape[-1] ** -0.5) + + if self.flash: + return self.flash_attn(q, k, v) + + # similarity + + sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale + + # attention + + attn = sim.softmax(dim=-1) + attn = self.attn_dropout(attn) + + # aggregate values + + out = einsum(f"b h i j, b h j d -> b h i d", attn, v) + + return out diff --git a/data_pipeline/seperation/models/bs_roformer/bs_roformer.py b/data_pipeline/seperation/models/bs_roformer/bs_roformer.py new file mode 100644 index 0000000000000000000000000000000000000000..0d760a7e6be9734046492418dbaa503ae0197164 --- /dev/null +++ b/data_pipeline/seperation/models/bs_roformer/bs_roformer.py @@ -0,0 +1,577 @@ +from functools import partial + +import torch +from torch import nn, einsum, Tensor +from torch.nn import Module, ModuleList +import torch.nn.functional as F + +from models.bs_roformer.attend import Attend + +from beartype.typing import Tuple, Optional, List, Callable +from beartype import beartype + +from rotary_embedding_torch import RotaryEmbedding + +from einops import rearrange, pack, unpack +from einops.layers.torch import Rearrange + +# helper functions + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +# norm + +def l2norm(t): + return F.normalize(t, dim = -1, p = 2) + + +class RMSNorm(Module): + def __init__(self, dim): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return F.normalize(x, dim=-1) * self.scale * self.gamma + + +# attention + +class FeedForward(Module): + def __init__( + self, + dim, + mult=4, + dropout=0. + ): + super().__init__() + dim_inner = int(dim * mult) + self.net = nn.Sequential( + RMSNorm(dim), + nn.Linear(dim, dim_inner), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim_inner, dim), + nn.Dropout(dropout) + ) + + def forward(self, x): + return self.net(x) + + +class Attention(Module): + def __init__( + self, + dim, + heads=8, + dim_head=64, + dropout=0., + rotary_embed=None, + flash=True + ): + super().__init__() + self.heads = heads + self.scale = dim_head ** -0.5 + dim_inner = heads * dim_head + + self.rotary_embed = rotary_embed + + self.attend = Attend(flash=flash, dropout=dropout) + + self.norm = RMSNorm(dim) + self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) + + self.to_gates = nn.Linear(dim, heads) + + self.to_out = nn.Sequential( + nn.Linear(dim_inner, dim, bias=False), + nn.Dropout(dropout) + ) + + def forward(self, x): + x = self.norm(x) + + q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv=3, h=self.heads) + + if exists(self.rotary_embed): + q = self.rotary_embed.rotate_queries_or_keys(q) + k = self.rotary_embed.rotate_queries_or_keys(k) + + out = self.attend(q, k, v) + + gates = self.to_gates(x) + out = out * rearrange(gates, 'b n h -> b h n 1').sigmoid() + + out = rearrange(out, 'b h n d -> b n (h d)') + return self.to_out(out) + + +class LinearAttention(Module): + """ + this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. + """ + + @beartype + def __init__( + self, + *, + dim, + dim_head=32, + heads=8, + scale=8, + flash=False, + dropout=0. + ): + super().__init__() + dim_inner = dim_head * heads + self.norm = RMSNorm(dim) + + self.to_qkv = nn.Sequential( + nn.Linear(dim, dim_inner * 3, bias=False), + Rearrange('b n (qkv h d) -> qkv b h d n', qkv=3, h=heads) + ) + + self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) + + self.attend = Attend( + scale=scale, + dropout=dropout, + flash=flash + ) + + self.to_out = nn.Sequential( + Rearrange('b h d n -> b n (h d)'), + nn.Linear(dim_inner, dim, bias=False) + ) + + def forward( + self, + x + ): + x = self.norm(x) + + q, k, v = self.to_qkv(x) + + q, k = map(l2norm, (q, k)) + q = q * self.temperature.exp() + + out = self.attend(q, k, v) + + return self.to_out(out) + + +class Transformer(Module): + def __init__( + self, + *, + dim, + depth, + dim_head=64, + heads=8, + attn_dropout=0., + ff_dropout=0., + ff_mult=4, + norm_output=True, + rotary_embed=None, + flash_attn=True, + linear_attn=False + ): + super().__init__() + self.layers = ModuleList([]) + + for _ in range(depth): + if linear_attn: + attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) + else: + attn = Attention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, + rotary_embed=rotary_embed, flash=flash_attn) + + self.layers.append(ModuleList([ + attn, + FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout) + ])) + + self.norm = RMSNorm(dim) if norm_output else nn.Identity() + + def forward(self, x): + + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + + return self.norm(x) + + +# bandsplit module + +class BandSplit(Module): + @beartype + def __init__( + self, + dim, + dim_inputs: Tuple[int, ...] + ): + super().__init__() + self.dim_inputs = dim_inputs + self.to_features = ModuleList([]) + + for dim_in in dim_inputs: + net = nn.Sequential( + RMSNorm(dim_in), + nn.Linear(dim_in, dim) + ) + + self.to_features.append(net) + + def forward(self, x): + x = x.split(self.dim_inputs, dim=-1) + + outs = [] + for split_input, to_feature in zip(x, self.to_features): + split_output = to_feature(split_input) + outs.append(split_output) + + return torch.stack(outs, dim=-2) + + +def MLP( + dim_in, + dim_out, + dim_hidden=None, + depth=1, + activation=nn.Tanh +): + dim_hidden = default(dim_hidden, dim_in) + + net = [] + dims = (dim_in, *((dim_hidden,) * (depth - 1)), dim_out) + + for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): + is_last = ind == (len(dims) - 2) + + net.append(nn.Linear(layer_dim_in, layer_dim_out)) + + if is_last: + continue + + net.append(activation()) + + return nn.Sequential(*net) + + +class MaskEstimator(Module): + @beartype + def __init__( + self, + dim, + dim_inputs: Tuple[int, ...], + depth, + mlp_expansion_factor=4 + ): + super().__init__() + self.dim_inputs = dim_inputs + self.to_freqs = ModuleList([]) + dim_hidden = dim * mlp_expansion_factor + + for dim_in in dim_inputs: + net = [] + + mlp = nn.Sequential( + MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), + nn.GLU(dim=-1) + ) + + self.to_freqs.append(mlp) + + def forward(self, x): + x = x.unbind(dim=-2) + + outs = [] + + for band_features, mlp in zip(x, self.to_freqs): + freq_out = mlp(band_features) + outs.append(freq_out) + + return torch.cat(outs, dim=-1) + + +# main class + +DEFAULT_FREQS_PER_BANDS = ( + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 12, 12, 12, 12, 12, 12, 12, 12, + 24, 24, 24, 24, 24, 24, 24, 24, + 48, 48, 48, 48, 48, 48, 48, 48, + 128, 129, +) + + +class BSRoformer(Module): + + @beartype + def __init__( + self, + dim, + *, + depth, + stereo=False, + num_stems=1, + time_transformer_depth=2, + freq_transformer_depth=2, + linear_transformer_depth=0, + freqs_per_bands: Tuple[int, ...] = DEFAULT_FREQS_PER_BANDS, + # in the paper, they divide into ~60 bands, test with 1 for starters + dim_head=64, + heads=8, + attn_dropout=0., + ff_dropout=0., + flash_attn=True, + dim_freqs_in=1025, + stft_n_fft=2048, + stft_hop_length=512, + # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction + stft_win_length=2048, + stft_normalized=False, + stft_window_fn: Optional[Callable] = None, + mask_estimator_depth=2, + multi_stft_resolution_loss_weight=1., + multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), + multi_stft_hop_size=147, + multi_stft_normalized=False, + multi_stft_window_fn: Callable = torch.hann_window + ): + super().__init__() + + self.stereo = stereo + self.audio_channels = 2 if stereo else 1 + self.num_stems = num_stems + + self.layers = ModuleList([]) + + transformer_kwargs = dict( + dim=dim, + heads=heads, + dim_head=dim_head, + attn_dropout=attn_dropout, + ff_dropout=ff_dropout, + flash_attn=flash_attn, + norm_output=False + ) + + time_rotary_embed = RotaryEmbedding(dim=dim_head) + freq_rotary_embed = RotaryEmbedding(dim=dim_head) + + for _ in range(depth): + tran_modules = [] + if linear_transformer_depth > 0: + tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) + tran_modules.append( + Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) + ) + tran_modules.append( + Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) + ) + self.layers.append(nn.ModuleList(tran_modules)) + + self.final_norm = RMSNorm(dim) + + self.stft_kwargs = dict( + n_fft=stft_n_fft, + hop_length=stft_hop_length, + win_length=stft_win_length, + normalized=stft_normalized + ) + + self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) + + freqs = torch.stft(torch.randn(1, 4096), **self.stft_kwargs, return_complex=True).shape[1] + + assert len(freqs_per_bands) > 1 + assert sum( + freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings, but got {sum(freqs_per_bands)}' + + freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in freqs_per_bands) + + self.band_split = BandSplit( + dim=dim, + dim_inputs=freqs_per_bands_with_complex + ) + + self.mask_estimators = nn.ModuleList([]) + + for _ in range(num_stems): + mask_estimator = MaskEstimator( + dim=dim, + dim_inputs=freqs_per_bands_with_complex, + depth=mask_estimator_depth + ) + + self.mask_estimators.append(mask_estimator) + + # for the multi-resolution stft loss + + self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight + self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes + self.multi_stft_n_fft = stft_n_fft + self.multi_stft_window_fn = multi_stft_window_fn + + self.multi_stft_kwargs = dict( + hop_length=multi_stft_hop_size, + normalized=multi_stft_normalized + ) + + def forward( + self, + raw_audio, + target=None, + return_loss_breakdown=False + ): + """ + einops + + b - batch + f - freq + t - time + s - audio channel (1 for mono, 2 for stereo) + n - number of 'stems' + c - complex (2) + d - feature dimension + """ + + device = raw_audio.device + + if raw_audio.ndim == 2: + raw_audio = rearrange(raw_audio, 'b t -> b 1 t') + + channels = raw_audio.shape[1] + assert (not self.stereo and channels == 1) or ( + self.stereo and channels == 2), 'stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)' + + # to stft + + raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, '* t') + + stft_window = self.stft_window_fn(device=device) + + stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) + stft_repr = torch.view_as_real(stft_repr) + + stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, '* f t c') + stft_repr = rearrange(stft_repr, + 'b s f t c -> b (f s) t c') # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting + + x = rearrange(stft_repr, 'b f t c -> b t (f c)') + + x = self.band_split(x) + + # axial / hierarchical attention + + for transformer_block in self.layers: + + if len(transformer_block) == 3: + linear_transformer, time_transformer, freq_transformer = transformer_block + + x, ft_ps = pack([x], 'b * d') + x = linear_transformer(x) + x, = unpack(x, ft_ps, 'b * d') + else: + time_transformer, freq_transformer = transformer_block + + x = rearrange(x, 'b t f d -> b f t d') + x, ps = pack([x], '* t d') + + x = time_transformer(x) + + x, = unpack(x, ps, '* t d') + x = rearrange(x, 'b f t d -> b t f d') + x, ps = pack([x], '* f d') + + x = freq_transformer(x) + + x, = unpack(x, ps, '* f d') + + x = self.final_norm(x) + + num_stems = len(self.mask_estimators) + + mask = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) + mask = rearrange(mask, 'b n t (f c) -> b n f t c', c=2) + + # modulate frequency representation + + stft_repr = rearrange(stft_repr, 'b f t c -> b 1 f t c') + + # complex number multiplication + + stft_repr = torch.view_as_complex(stft_repr) + mask = torch.view_as_complex(mask) + + stft_repr = stft_repr * mask + + # istft + + stft_repr = rearrange(stft_repr, 'b n (f s) t -> (b n s) f t', s=self.audio_channels) + + recon_audio = torch.istft(stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False) + + recon_audio = rearrange(recon_audio, '(b n s) t -> b n s t', s=self.audio_channels, n=num_stems) + + if num_stems == 1: + recon_audio = rearrange(recon_audio, 'b 1 s t -> b s t') + + # if a target is passed in, calculate loss for learning + + if not exists(target): + return recon_audio + + if self.num_stems > 1: + assert target.ndim == 4 and target.shape[1] == self.num_stems + + if target.ndim == 2: + target = rearrange(target, '... t -> ... 1 t') + + target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft + + loss = F.l1_loss(recon_audio, target) + + multi_stft_resolution_loss = 0. + + for window_size in self.multi_stft_resolutions_window_sizes: + res_stft_kwargs = dict( + n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft + win_length=window_size, + return_complex=True, + window=self.multi_stft_window_fn(window_size, device=device), + **self.multi_stft_kwargs, + ) + + recon_Y = torch.stft(rearrange(recon_audio, '... s t -> (... s) t'), **res_stft_kwargs) + target_Y = torch.stft(rearrange(target, '... s t -> (... s) t'), **res_stft_kwargs) + + multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) + + weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight + + total_loss = loss + weighted_multi_resolution_loss + + if not return_loss_breakdown: + return total_loss + + return total_loss, (loss, multi_stft_resolution_loss) \ No newline at end of file diff --git a/data_pipeline/seperation/models/bs_roformer/mel_band_roformer.py b/data_pipeline/seperation/models/bs_roformer/mel_band_roformer.py new file mode 100644 index 0000000000000000000000000000000000000000..a28fa2fd3f052ce32e83009c66e00073201dc74e --- /dev/null +++ b/data_pipeline/seperation/models/bs_roformer/mel_band_roformer.py @@ -0,0 +1,637 @@ +from functools import partial + +import torch +from torch import nn, einsum, Tensor +from torch.nn import Module, ModuleList +import torch.nn.functional as F + +from models.bs_roformer.attend import Attend + +from beartype.typing import Tuple, Optional, List, Callable +from beartype import beartype + +from rotary_embedding_torch import RotaryEmbedding + +from einops import rearrange, pack, unpack, reduce, repeat +from einops.layers.torch import Rearrange + +from librosa import filters + + +# helper functions + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +def pad_at_dim(t, pad, dim=-1, value=0.): + dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) + zeros = ((0, 0) * dims_from_right) + return F.pad(t, (*zeros, *pad), value=value) + + +def l2norm(t): + return F.normalize(t, dim=-1, p=2) + + +# norm + +class RMSNorm(Module): + def __init__(self, dim): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return F.normalize(x, dim=-1) * self.scale * self.gamma + + +# attention + +class FeedForward(Module): + def __init__( + self, + dim, + mult=4, + dropout=0. + ): + super().__init__() + dim_inner = int(dim * mult) + self.net = nn.Sequential( + RMSNorm(dim), + nn.Linear(dim, dim_inner), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim_inner, dim), + nn.Dropout(dropout) + ) + + def forward(self, x): + return self.net(x) + + +class Attention(Module): + def __init__( + self, + dim, + heads=8, + dim_head=64, + dropout=0., + rotary_embed=None, + flash=True + ): + super().__init__() + self.heads = heads + self.scale = dim_head ** -0.5 + dim_inner = heads * dim_head + + self.rotary_embed = rotary_embed + + self.attend = Attend(flash=flash, dropout=dropout) + + self.norm = RMSNorm(dim) + self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) + + self.to_gates = nn.Linear(dim, heads) + + self.to_out = nn.Sequential( + nn.Linear(dim_inner, dim, bias=False), + nn.Dropout(dropout) + ) + + def forward(self, x): + x = self.norm(x) + + q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv=3, h=self.heads) + + if exists(self.rotary_embed): + q = self.rotary_embed.rotate_queries_or_keys(q) + k = self.rotary_embed.rotate_queries_or_keys(k) + + out = self.attend(q, k, v) + + gates = self.to_gates(x) + out = out * rearrange(gates, 'b n h -> b h n 1').sigmoid() + + out = rearrange(out, 'b h n d -> b n (h d)') + return self.to_out(out) + + +class LinearAttention(Module): + """ + this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. + """ + + @beartype + def __init__( + self, + *, + dim, + dim_head=32, + heads=8, + scale=8, + flash=False, + dropout=0. + ): + super().__init__() + dim_inner = dim_head * heads + self.norm = RMSNorm(dim) + + self.to_qkv = nn.Sequential( + nn.Linear(dim, dim_inner * 3, bias=False), + Rearrange('b n (qkv h d) -> qkv b h d n', qkv=3, h=heads) + ) + + self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) + + self.attend = Attend( + scale=scale, + dropout=dropout, + flash=flash + ) + + self.to_out = nn.Sequential( + Rearrange('b h d n -> b n (h d)'), + nn.Linear(dim_inner, dim, bias=False) + ) + + def forward( + self, + x + ): + x = self.norm(x) + + q, k, v = self.to_qkv(x) + + q, k = map(l2norm, (q, k)) + q = q * self.temperature.exp() + + out = self.attend(q, k, v) + + return self.to_out(out) + + +class Transformer(Module): + def __init__( + self, + *, + dim, + depth, + dim_head=64, + heads=8, + attn_dropout=0., + ff_dropout=0., + ff_mult=4, + norm_output=True, + rotary_embed=None, + flash_attn=True, + linear_attn=False + ): + super().__init__() + self.layers = ModuleList([]) + + for _ in range(depth): + if linear_attn: + attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) + else: + attn = Attention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, + rotary_embed=rotary_embed, flash=flash_attn) + + self.layers.append(ModuleList([ + attn, + FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout) + ])) + + self.norm = RMSNorm(dim) if norm_output else nn.Identity() + + def forward(self, x): + + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + + return self.norm(x) + + +# bandsplit module + +class BandSplit(Module): + @beartype + def __init__( + self, + dim, + dim_inputs: Tuple[int, ...] + ): + super().__init__() + self.dim_inputs = dim_inputs + self.to_features = ModuleList([]) + + for dim_in in dim_inputs: + net = nn.Sequential( + RMSNorm(dim_in), + nn.Linear(dim_in, dim) + ) + + self.to_features.append(net) + + def forward(self, x): + x = x.split(self.dim_inputs, dim=-1) + + outs = [] + for split_input, to_feature in zip(x, self.to_features): + split_output = to_feature(split_input) + outs.append(split_output) + + return torch.stack(outs, dim=-2) + + +def MLP( + dim_in, + dim_out, + dim_hidden=None, + depth=1, + activation=nn.Tanh +): + dim_hidden = default(dim_hidden, dim_in) + + net = [] + dims = (dim_in, *((dim_hidden,) * depth), dim_out) + + for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): + is_last = ind == (len(dims) - 2) + + net.append(nn.Linear(layer_dim_in, layer_dim_out)) + + if is_last: + continue + + net.append(activation()) + + return nn.Sequential(*net) + + +class MaskEstimator(Module): + @beartype + def __init__( + self, + dim, + dim_inputs: Tuple[int, ...], + depth, + mlp_expansion_factor=4 + ): + super().__init__() + self.dim_inputs = dim_inputs + self.to_freqs = ModuleList([]) + dim_hidden = dim * mlp_expansion_factor + + for dim_in in dim_inputs: + net = [] + + mlp = nn.Sequential( + MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), + nn.GLU(dim=-1) + ) + + self.to_freqs.append(mlp) + + def forward(self, x): + x = x.unbind(dim=-2) + + outs = [] + + for band_features, mlp in zip(x, self.to_freqs): + freq_out = mlp(band_features) + outs.append(freq_out) + + return torch.cat(outs, dim=-1) + + +# main class + +class MelBandRoformer(Module): + + @beartype + def __init__( + self, + dim, + *, + depth, + stereo=False, + num_stems=1, + time_transformer_depth=2, + freq_transformer_depth=2, + linear_transformer_depth=0, + num_bands=60, + dim_head=64, + heads=8, + attn_dropout=0.1, + ff_dropout=0.1, + flash_attn=True, + dim_freqs_in=1025, + sample_rate=44100, # needed for mel filter bank from librosa + stft_n_fft=2048, + stft_hop_length=512, + # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction + stft_win_length=2048, + stft_normalized=False, + stft_window_fn: Optional[Callable] = None, + mask_estimator_depth=1, + multi_stft_resolution_loss_weight=1., + multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), + multi_stft_hop_size=147, + multi_stft_normalized=False, + multi_stft_window_fn: Callable = torch.hann_window, + match_input_audio_length=False, # if True, pad output tensor to match length of input tensor + ): + super().__init__() + + self.stereo = stereo + self.audio_channels = 2 if stereo else 1 + self.num_stems = num_stems + + self.layers = ModuleList([]) + + transformer_kwargs = dict( + dim=dim, + heads=heads, + dim_head=dim_head, + attn_dropout=attn_dropout, + ff_dropout=ff_dropout, + flash_attn=flash_attn + ) + + time_rotary_embed = RotaryEmbedding(dim=dim_head) + freq_rotary_embed = RotaryEmbedding(dim=dim_head) + + for _ in range(depth): + tran_modules = [] + if linear_transformer_depth > 0: + tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) + tran_modules.append( + Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) + ) + tran_modules.append( + Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) + ) + self.layers.append(nn.ModuleList(tran_modules)) + + self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) + + self.stft_kwargs = dict( + n_fft=stft_n_fft, + hop_length=stft_hop_length, + win_length=stft_win_length, + normalized=stft_normalized + ) + + freqs = torch.stft(torch.randn(1, 4096), **self.stft_kwargs, return_complex=True).shape[1] + + # create mel filter bank + # with librosa.filters.mel as in section 2 of paper + + mel_filter_bank_numpy = filters.mel(sr=sample_rate, n_fft=stft_n_fft, n_mels=num_bands) + + mel_filter_bank = torch.from_numpy(mel_filter_bank_numpy) + + # for some reason, it doesn't include the first freq? just force a value for now + + mel_filter_bank[0][0] = 1. + + # In some systems/envs we get 0.0 instead of ~1.9e-18 in the last position, + # so let's force a positive value + + mel_filter_bank[-1, -1] = 1. + + # binary as in paper (then estimated masks are averaged for overlapping regions) + + freqs_per_band = mel_filter_bank > 0 + assert freqs_per_band.any(dim=0).all(), 'all frequencies need to be covered by all bands for now' + + repeated_freq_indices = repeat(torch.arange(freqs), 'f -> b f', b=num_bands) + freq_indices = repeated_freq_indices[freqs_per_band] + + if stereo: + freq_indices = repeat(freq_indices, 'f -> f s', s=2) + freq_indices = freq_indices * 2 + torch.arange(2) + freq_indices = rearrange(freq_indices, 'f s -> (f s)') + + self.register_buffer('freq_indices', freq_indices, persistent=False) + self.register_buffer('freqs_per_band', freqs_per_band, persistent=False) + + num_freqs_per_band = reduce(freqs_per_band, 'b f -> b', 'sum') + num_bands_per_freq = reduce(freqs_per_band, 'b f -> f', 'sum') + + self.register_buffer('num_freqs_per_band', num_freqs_per_band, persistent=False) + self.register_buffer('num_bands_per_freq', num_bands_per_freq, persistent=False) + + # band split and mask estimator + + freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in num_freqs_per_band.tolist()) + + self.band_split = BandSplit( + dim=dim, + dim_inputs=freqs_per_bands_with_complex + ) + + self.mask_estimators = nn.ModuleList([]) + + for _ in range(num_stems): + mask_estimator = MaskEstimator( + dim=dim, + dim_inputs=freqs_per_bands_with_complex, + depth=mask_estimator_depth + ) + + self.mask_estimators.append(mask_estimator) + + # for the multi-resolution stft loss + + self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight + self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes + self.multi_stft_n_fft = stft_n_fft + self.multi_stft_window_fn = multi_stft_window_fn + + self.multi_stft_kwargs = dict( + hop_length=multi_stft_hop_size, + normalized=multi_stft_normalized + ) + + self.match_input_audio_length = match_input_audio_length + + def forward( + self, + raw_audio, + target=None, + return_loss_breakdown=False + ): + """ + einops + + b - batch + f - freq + t - time + s - audio channel (1 for mono, 2 for stereo) + n - number of 'stems' + c - complex (2) + d - feature dimension + """ + + device = raw_audio.device + + if raw_audio.ndim == 2: + raw_audio = rearrange(raw_audio, 'b t -> b 1 t') + + batch, channels, raw_audio_length = raw_audio.shape + + istft_length = raw_audio_length if self.match_input_audio_length else None + + assert (not self.stereo and channels == 1) or ( + self.stereo and channels == 2), 'stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)' + + # to stft + + raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, '* t') + + stft_window = self.stft_window_fn(device=device) + + stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) + stft_repr = torch.view_as_real(stft_repr) + + stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, '* f t c') + stft_repr = rearrange(stft_repr, + 'b s f t c -> b (f s) t c') # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting + + # index out all frequencies for all frequency ranges across bands ascending in one go + + batch_arange = torch.arange(batch, device=device)[..., None] + + # account for stereo + + x = stft_repr[batch_arange, self.freq_indices] + + # fold the complex (real and imag) into the frequencies dimension + + x = rearrange(x, 'b f t c -> b t (f c)') + + x = self.band_split(x) + + # axial / hierarchical attention + + for transformer_block in self.layers: + + if len(transformer_block) == 3: + linear_transformer, time_transformer, freq_transformer = transformer_block + + x, ft_ps = pack([x], 'b * d') + x = linear_transformer(x) + x, = unpack(x, ft_ps, 'b * d') + else: + time_transformer, freq_transformer = transformer_block + + x = rearrange(x, 'b t f d -> b f t d') + x, ps = pack([x], '* t d') + + x = time_transformer(x) + + x, = unpack(x, ps, '* t d') + x = rearrange(x, 'b f t d -> b t f d') + x, ps = pack([x], '* f d') + + x = freq_transformer(x) + + x, = unpack(x, ps, '* f d') + + num_stems = len(self.mask_estimators) + + masks = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) + masks = rearrange(masks, 'b n t (f c) -> b n f t c', c=2) + + # modulate frequency representation + + stft_repr = rearrange(stft_repr, 'b f t c -> b 1 f t c') + + # complex number multiplication + + stft_repr = torch.view_as_complex(stft_repr) + masks = torch.view_as_complex(masks) + + masks = masks.type(stft_repr.dtype) + + # need to average the estimated mask for the overlapped frequencies + + scatter_indices = repeat(self.freq_indices, 'f -> b n f t', b=batch, n=num_stems, t=stft_repr.shape[-1]) + + stft_repr_expanded_stems = repeat(stft_repr, 'b 1 ... -> b n ...', n=num_stems) + masks_summed = torch.zeros_like(stft_repr_expanded_stems).scatter_add_(2, scatter_indices, masks) + + denom = repeat(self.num_bands_per_freq, 'f -> (f r) 1', r=channels) + + masks_averaged = masks_summed / denom.clamp(min=1e-8) + + # modulate stft repr with estimated mask + + stft_repr = stft_repr * masks_averaged + + # istft + + stft_repr = rearrange(stft_repr, 'b n (f s) t -> (b n s) f t', s=self.audio_channels) + + recon_audio = torch.istft(stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, + length=istft_length) + + recon_audio = rearrange(recon_audio, '(b n s) t -> b n s t', b=batch, s=self.audio_channels, n=num_stems) + + if num_stems == 1: + recon_audio = rearrange(recon_audio, 'b 1 s t -> b s t') + + # if a target is passed in, calculate loss for learning + + if not exists(target): + return recon_audio + + if self.num_stems > 1: + assert target.ndim == 4 and target.shape[1] == self.num_stems + + if target.ndim == 2: + target = rearrange(target, '... t -> ... 1 t') + + target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft + + loss = F.l1_loss(recon_audio, target) + + multi_stft_resolution_loss = 0. + + for window_size in self.multi_stft_resolutions_window_sizes: + res_stft_kwargs = dict( + n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft + win_length=window_size, + return_complex=True, + window=self.multi_stft_window_fn(window_size, device=device), + **self.multi_stft_kwargs, + ) + + recon_Y = torch.stft(rearrange(recon_audio, '... s t -> (... s) t'), **res_stft_kwargs) + target_Y = torch.stft(rearrange(target, '... s t -> (... s) t'), **res_stft_kwargs) + + multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) + + weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight + + total_loss = loss + weighted_multi_resolution_loss + + if not return_loss_breakdown: + return total_loss + + return total_loss, (loss, multi_stft_resolution_loss) diff --git a/data_pipeline/seperation/models/demucs4ht.py b/data_pipeline/seperation/models/demucs4ht.py new file mode 100644 index 0000000000000000000000000000000000000000..06c279c31a7ac7e12af4375a5715eb291ad5405c --- /dev/null +++ b/data_pipeline/seperation/models/demucs4ht.py @@ -0,0 +1,713 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +import numpy as np +import torch +import json +from omegaconf import OmegaConf +from demucs.demucs import Demucs +from demucs.hdemucs import HDemucs + +import math +from openunmix.filtering import wiener +from torch import nn +from torch.nn import functional as F +from fractions import Fraction +from einops import rearrange + +from demucs.transformer import CrossTransformerEncoder + +from demucs.demucs import rescale_module +from demucs.states import capture_init +from demucs.spec import spectro, ispectro +from demucs.hdemucs import pad1d, ScaledEmbedding, HEncLayer, MultiWrap, HDecLayer + + +class HTDemucs(nn.Module): + """ + Spectrogram and hybrid Demucs model. + The spectrogram model has the same structure as Demucs, except the first few layers are over the + frequency axis, until there is only 1 frequency, and then it moves to time convolutions. + Frequency layers can still access information across time steps thanks to the DConv residual. + + Hybrid model have a parallel time branch. At some layer, the time branch has the same stride + as the frequency branch and then the two are combined. The opposite happens in the decoder. + + Models can either use naive iSTFT from masking, Wiener filtering ([Ulhih et al. 2017]), + or complex as channels (CaC) [Choi et al. 2020]. Wiener filtering is based on + Open Unmix implementation [Stoter et al. 2019]. + + The loss is always on the temporal domain, by backpropagating through the above + output methods and iSTFT. This allows to define hybrid models nicely. However, this breaks + a bit Wiener filtering, as doing more iteration at test time will change the spectrogram + contribution, without changing the one from the waveform, which will lead to worse performance. + I tried using the residual option in OpenUnmix Wiener implementation, but it didn't improve. + CaC on the other hand provides similar performance for hybrid, and works naturally with + hybrid models. + + This model also uses frequency embeddings are used to improve efficiency on convolutions + over the freq. axis, following [Isik et al. 2020] (https://arxiv.org/pdf/2008.04470.pdf). + + Unlike classic Demucs, there is no resampling here, and normalization is always applied. + """ + + @capture_init + def __init__( + self, + sources, + # Channels + audio_channels=2, + channels=48, + channels_time=None, + growth=2, + # STFT + nfft=4096, + num_subbands=1, + wiener_iters=0, + end_iters=0, + wiener_residual=False, + cac=True, + # Main structure + depth=4, + rewrite=True, + # Frequency branch + multi_freqs=None, + multi_freqs_depth=3, + freq_emb=0.2, + emb_scale=10, + emb_smooth=True, + # Convolutions + kernel_size=8, + time_stride=2, + stride=4, + context=1, + context_enc=0, + # Normalization + norm_starts=4, + norm_groups=4, + # DConv residual branch + dconv_mode=1, + dconv_depth=2, + dconv_comp=8, + dconv_init=1e-3, + # Before the Transformer + bottom_channels=0, + # Transformer + t_layers=5, + t_emb="sin", + t_hidden_scale=4.0, + t_heads=8, + t_dropout=0.0, + t_max_positions=10000, + t_norm_in=True, + t_norm_in_group=False, + t_group_norm=False, + t_norm_first=True, + t_norm_out=True, + t_max_period=10000.0, + t_weight_decay=0.0, + t_lr=None, + t_layer_scale=True, + t_gelu=True, + t_weight_pos_embed=1.0, + t_sin_random_shift=0, + t_cape_mean_normalize=True, + t_cape_augment=True, + t_cape_glob_loc_scale=[5000.0, 1.0, 1.4], + t_sparse_self_attn=False, + t_sparse_cross_attn=False, + t_mask_type="diag", + t_mask_random_seed=42, + t_sparse_attn_window=500, + t_global_window=100, + t_sparsity=0.95, + t_auto_sparsity=False, + # ------ Particuliar parameters + t_cross_first=False, + # Weight init + rescale=0.1, + # Metadata + samplerate=44100, + segment=10, + use_train_segment=False, + ): + """ + Args: + sources (list[str]): list of source names. + audio_channels (int): input/output audio channels. + channels (int): initial number of hidden channels. + channels_time: if not None, use a different `channels` value for the time branch. + growth: increase the number of hidden channels by this factor at each layer. + nfft: number of fft bins. Note that changing this require careful computation of + various shape parameters and will not work out of the box for hybrid models. + wiener_iters: when using Wiener filtering, number of iterations at test time. + end_iters: same but at train time. For a hybrid model, must be equal to `wiener_iters`. + wiener_residual: add residual source before wiener filtering. + cac: uses complex as channels, i.e. complex numbers are 2 channels each + in input and output. no further processing is done before ISTFT. + depth (int): number of layers in the encoder and in the decoder. + rewrite (bool): add 1x1 convolution to each layer. + multi_freqs: list of frequency ratios for splitting frequency bands with `MultiWrap`. + multi_freqs_depth: how many layers to wrap with `MultiWrap`. Only the outermost + layers will be wrapped. + freq_emb: add frequency embedding after the first frequency layer if > 0, + the actual value controls the weight of the embedding. + emb_scale: equivalent to scaling the embedding learning rate + emb_smooth: initialize the embedding with a smooth one (with respect to frequencies). + kernel_size: kernel_size for encoder and decoder layers. + stride: stride for encoder and decoder layers. + time_stride: stride for the final time layer, after the merge. + context: context for 1x1 conv in the decoder. + context_enc: context for 1x1 conv in the encoder. + norm_starts: layer at which group norm starts being used. + decoder layers are numbered in reverse order. + norm_groups: number of groups for group norm. + dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. + dconv_depth: depth of residual DConv branch. + dconv_comp: compression of DConv branch. + dconv_attn: adds attention layers in DConv branch starting at this layer. + dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. + dconv_init: initial scale for the DConv branch LayerScale. + bottom_channels: if >0 it adds a linear layer (1x1 Conv) before and after the + transformer in order to change the number of channels + t_layers: number of layers in each branch (waveform and spec) of the transformer + t_emb: "sin", "cape" or "scaled" + t_hidden_scale: the hidden scale of the Feedforward parts of the transformer + for instance if C = 384 (the number of channels in the transformer) and + t_hidden_scale = 4.0 then the intermediate layer of the FFN has dimension + 384 * 4 = 1536 + t_heads: number of heads for the transformer + t_dropout: dropout in the transformer + t_max_positions: max_positions for the "scaled" positional embedding, only + useful if t_emb="scaled" + t_norm_in: (bool) norm before addinf positional embedding and getting into the + transformer layers + t_norm_in_group: (bool) if True while t_norm_in=True, the norm is on all the + timesteps (GroupNorm with group=1) + t_group_norm: (bool) if True, the norms of the Encoder Layers are on all the + timesteps (GroupNorm with group=1) + t_norm_first: (bool) if True the norm is before the attention and before the FFN + t_norm_out: (bool) if True, there is a GroupNorm (group=1) at the end of each layer + t_max_period: (float) denominator in the sinusoidal embedding expression + t_weight_decay: (float) weight decay for the transformer + t_lr: (float) specific learning rate for the transformer + t_layer_scale: (bool) Layer Scale for the transformer + t_gelu: (bool) activations of the transformer are GeLU if True, ReLU else + t_weight_pos_embed: (float) weighting of the positional embedding + t_cape_mean_normalize: (bool) if t_emb="cape", normalisation of positional embeddings + see: https://arxiv.org/abs/2106.03143 + t_cape_augment: (bool) if t_emb="cape", must be True during training and False + during the inference, see: https://arxiv.org/abs/2106.03143 + t_cape_glob_loc_scale: (list of 3 floats) if t_emb="cape", CAPE parameters + see: https://arxiv.org/abs/2106.03143 + t_sparse_self_attn: (bool) if True, the self attentions are sparse + t_sparse_cross_attn: (bool) if True, the cross-attentions are sparse (don't use it + unless you designed really specific masks) + t_mask_type: (str) can be "diag", "jmask", "random", "global" or any combination + with '_' between: i.e. "diag_jmask_random" (note that this is permutation + invariant i.e. "diag_jmask_random" is equivalent to "jmask_random_diag") + t_mask_random_seed: (int) if "random" is in t_mask_type, controls the seed + that generated the random part of the mask + t_sparse_attn_window: (int) if "diag" is in t_mask_type, for a query (i), and + a key (j), the mask is True id |i-j|<=t_sparse_attn_window + t_global_window: (int) if "global" is in t_mask_type, mask[:t_global_window, :] + and mask[:, :t_global_window] will be True + t_sparsity: (float) if "random" is in t_mask_type, t_sparsity is the sparsity + level of the random part of the mask. + t_cross_first: (bool) if True cross attention is the first layer of the + transformer (False seems to be better) + rescale: weight rescaling trick + use_train_segment: (bool) if True, the actual size that is used during the + training is used during inference. + """ + super().__init__() + self.num_subbands = num_subbands + self.cac = cac + self.wiener_residual = wiener_residual + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.bottom_channels = bottom_channels + self.channels = channels + self.samplerate = samplerate + self.segment = segment + self.use_train_segment = use_train_segment + self.nfft = nfft + self.hop_length = nfft // 4 + self.wiener_iters = wiener_iters + self.end_iters = end_iters + self.freq_emb = None + assert wiener_iters == end_iters + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + self.tencoder = nn.ModuleList() + self.tdecoder = nn.ModuleList() + + chin = audio_channels + chin_z = chin # number of channels for the freq branch + if self.cac: + chin_z *= 2 + if self.num_subbands > 1: + chin_z *= self.num_subbands + chout = channels_time or channels + chout_z = channels + freqs = nfft // 2 + + for index in range(depth): + norm = index >= norm_starts + freq = freqs > 1 + stri = stride + ker = kernel_size + if not freq: + assert freqs == 1 + ker = time_stride * 2 + stri = time_stride + + pad = True + last_freq = False + if freq and freqs <= kernel_size: + ker = freqs + pad = False + last_freq = True + + kw = { + "kernel_size": ker, + "stride": stri, + "freq": freq, + "pad": pad, + "norm": norm, + "rewrite": rewrite, + "norm_groups": norm_groups, + "dconv_kw": { + "depth": dconv_depth, + "compress": dconv_comp, + "init": dconv_init, + "gelu": True, + }, + } + kwt = dict(kw) + kwt["freq"] = 0 + kwt["kernel_size"] = kernel_size + kwt["stride"] = stride + kwt["pad"] = True + kw_dec = dict(kw) + multi = False + if multi_freqs and index < multi_freqs_depth: + multi = True + kw_dec["context_freq"] = False + + if last_freq: + chout_z = max(chout, chout_z) + chout = chout_z + + enc = HEncLayer( + chin_z, chout_z, dconv=dconv_mode & 1, context=context_enc, **kw + ) + if freq: + tenc = HEncLayer( + chin, + chout, + dconv=dconv_mode & 1, + context=context_enc, + empty=last_freq, + **kwt + ) + self.tencoder.append(tenc) + + if multi: + enc = MultiWrap(enc, multi_freqs) + self.encoder.append(enc) + if index == 0: + chin = self.audio_channels * len(self.sources) + chin_z = chin + if self.cac: + chin_z *= 2 + if self.num_subbands > 1: + chin_z *= self.num_subbands + dec = HDecLayer( + chout_z, + chin_z, + dconv=dconv_mode & 2, + last=index == 0, + context=context, + **kw_dec + ) + if multi: + dec = MultiWrap(dec, multi_freqs) + if freq: + tdec = HDecLayer( + chout, + chin, + dconv=dconv_mode & 2, + empty=last_freq, + last=index == 0, + context=context, + **kwt + ) + self.tdecoder.insert(0, tdec) + self.decoder.insert(0, dec) + + chin = chout + chin_z = chout_z + chout = int(growth * chout) + chout_z = int(growth * chout_z) + if freq: + if freqs <= kernel_size: + freqs = 1 + else: + freqs //= stride + if index == 0 and freq_emb: + self.freq_emb = ScaledEmbedding( + freqs, chin_z, smooth=emb_smooth, scale=emb_scale + ) + self.freq_emb_scale = freq_emb + + if rescale: + rescale_module(self, reference=rescale) + + transformer_channels = channels * growth ** (depth - 1) + if bottom_channels: + self.channel_upsampler = nn.Conv1d(transformer_channels, bottom_channels, 1) + self.channel_downsampler = nn.Conv1d( + bottom_channels, transformer_channels, 1 + ) + self.channel_upsampler_t = nn.Conv1d( + transformer_channels, bottom_channels, 1 + ) + self.channel_downsampler_t = nn.Conv1d( + bottom_channels, transformer_channels, 1 + ) + + transformer_channels = bottom_channels + + if t_layers > 0: + self.crosstransformer = CrossTransformerEncoder( + dim=transformer_channels, + emb=t_emb, + hidden_scale=t_hidden_scale, + num_heads=t_heads, + num_layers=t_layers, + cross_first=t_cross_first, + dropout=t_dropout, + max_positions=t_max_positions, + norm_in=t_norm_in, + norm_in_group=t_norm_in_group, + group_norm=t_group_norm, + norm_first=t_norm_first, + norm_out=t_norm_out, + max_period=t_max_period, + weight_decay=t_weight_decay, + lr=t_lr, + layer_scale=t_layer_scale, + gelu=t_gelu, + sin_random_shift=t_sin_random_shift, + weight_pos_embed=t_weight_pos_embed, + cape_mean_normalize=t_cape_mean_normalize, + cape_augment=t_cape_augment, + cape_glob_loc_scale=t_cape_glob_loc_scale, + sparse_self_attn=t_sparse_self_attn, + sparse_cross_attn=t_sparse_cross_attn, + mask_type=t_mask_type, + mask_random_seed=t_mask_random_seed, + sparse_attn_window=t_sparse_attn_window, + global_window=t_global_window, + sparsity=t_sparsity, + auto_sparsity=t_auto_sparsity, + ) + else: + self.crosstransformer = None + + def _spec(self, x): + hl = self.hop_length + nfft = self.nfft + x0 = x # noqa + + # We re-pad the signal in order to keep the property + # that the size of the output is exactly the size of the input + # divided by the stride (here hop_length), when divisible. + # This is achieved by padding by 1/4th of the kernel size (here nfft). + # which is not supported by torch.stft. + # Having all convolution operations follow this convention allow to easily + # align the time and frequency branches later on. + assert hl == nfft // 4 + le = int(math.ceil(x.shape[-1] / hl)) + pad = hl // 2 * 3 + x = pad1d(x, (pad, pad + le * hl - x.shape[-1]), mode="reflect") + + z = spectro(x, nfft, hl)[..., :-1, :] + assert z.shape[-1] == le + 4, (z.shape, x.shape, le) + z = z[..., 2: 2 + le] + return z + + def _ispec(self, z, length=None, scale=0): + hl = self.hop_length // (4**scale) + z = F.pad(z, (0, 0, 0, 1)) + z = F.pad(z, (2, 2)) + pad = hl // 2 * 3 + le = hl * int(math.ceil(length / hl)) + 2 * pad + x = ispectro(z, hl, length=le) + x = x[..., pad: pad + length] + return x + + def _magnitude(self, z): + # return the magnitude of the spectrogram, except when cac is True, + # in which case we just move the complex dimension to the channel one. + if self.cac: + B, C, Fr, T = z.shape + m = torch.view_as_real(z).permute(0, 1, 4, 2, 3) + m = m.reshape(B, C * 2, Fr, T) + else: + m = z.abs() + return m + + def _mask(self, z, m): + # Apply masking given the mixture spectrogram `z` and the estimated mask `m`. + # If `cac` is True, `m` is actually a full spectrogram and `z` is ignored. + niters = self.wiener_iters + if self.cac: + B, S, C, Fr, T = m.shape + out = m.view(B, S, -1, 2, Fr, T).permute(0, 1, 2, 4, 5, 3) + out = torch.view_as_complex(out.contiguous()) + return out + if self.training: + niters = self.end_iters + if niters < 0: + z = z[:, None] + return z / (1e-8 + z.abs()) * m + else: + return self._wiener(m, z, niters) + + def _wiener(self, mag_out, mix_stft, niters): + # apply wiener filtering from OpenUnmix. + init = mix_stft.dtype + wiener_win_len = 300 + residual = self.wiener_residual + + B, S, C, Fq, T = mag_out.shape + mag_out = mag_out.permute(0, 4, 3, 2, 1) + mix_stft = torch.view_as_real(mix_stft.permute(0, 3, 2, 1)) + + outs = [] + for sample in range(B): + pos = 0 + out = [] + for pos in range(0, T, wiener_win_len): + frame = slice(pos, pos + wiener_win_len) + z_out = wiener( + mag_out[sample, frame], + mix_stft[sample, frame], + niters, + residual=residual, + ) + out.append(z_out.transpose(-1, -2)) + outs.append(torch.cat(out, dim=0)) + out = torch.view_as_complex(torch.stack(outs, 0)) + out = out.permute(0, 4, 3, 2, 1).contiguous() + if residual: + out = out[:, :-1] + assert list(out.shape) == [B, S, C, Fq, T] + return out.to(init) + + def valid_length(self, length: int): + """ + Return a length that is appropriate for evaluation. + In our case, always return the training length, unless + it is smaller than the given length, in which case this + raises an error. + """ + if not self.use_train_segment: + return length + training_length = int(self.segment * self.samplerate) + if training_length < length: + raise ValueError( + f"Given length {length} is longer than " + f"training length {training_length}") + return training_length + + def cac2cws(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c, k, f // k, t) + x = x.reshape(b, c * k, f // k, t) + return x + + def cws2cac(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c // k, k, f, t) + x = x.reshape(b, c // k, f * k, t) + return x + + def forward(self, mix): + length = mix.shape[-1] + length_pre_pad = None + if self.use_train_segment: + if self.training: + self.segment = Fraction(mix.shape[-1], self.samplerate) + else: + training_length = int(self.segment * self.samplerate) + # print('Training length: {} Segment: {} Sample rate: {}'.format(training_length, self.segment, self.samplerate)) + if mix.shape[-1] < training_length: + length_pre_pad = mix.shape[-1] + mix = F.pad(mix, (0, training_length - length_pre_pad)) + # print("Mix: {}".format(mix.shape)) + # print("Length: {}".format(length)) + z = self._spec(mix) + # print("Z: {} Type: {}".format(z.shape, z.dtype)) + mag = self._magnitude(z) + x = mag + # print("MAG: {} Type: {}".format(x.shape, x.dtype)) + + if self.num_subbands > 1: + x = self.cac2cws(x) + # print("After SUBBANDS: {} Type: {}".format(x.shape, x.dtype)) + + B, C, Fq, T = x.shape + + # unlike previous Demucs, we always normalize because it is easier. + mean = x.mean(dim=(1, 2, 3), keepdim=True) + std = x.std(dim=(1, 2, 3), keepdim=True) + x = (x - mean) / (1e-5 + std) + # x will be the freq. branch input. + + # Prepare the time branch input. + xt = mix + meant = xt.mean(dim=(1, 2), keepdim=True) + stdt = xt.std(dim=(1, 2), keepdim=True) + xt = (xt - meant) / (1e-5 + stdt) + + # print("XT: {}".format(xt.shape)) + + # okay, this is a giant mess I know... + saved = [] # skip connections, freq. + saved_t = [] # skip connections, time. + lengths = [] # saved lengths to properly remove padding, freq branch. + lengths_t = [] # saved lengths for time branch. + for idx, encode in enumerate(self.encoder): + lengths.append(x.shape[-1]) + inject = None + if idx < len(self.tencoder): + # we have not yet merged branches. + lengths_t.append(xt.shape[-1]) + tenc = self.tencoder[idx] + xt = tenc(xt) + # print("Encode XT {}: {}".format(idx, xt.shape)) + if not tenc.empty: + # save for skip connection + saved_t.append(xt) + else: + # tenc contains just the first conv., so that now time and freq. + # branches have the same shape and can be merged. + inject = xt + x = encode(x, inject) + # print("Encode X {}: {}".format(idx, x.shape)) + if idx == 0 and self.freq_emb is not None: + # add frequency embedding to allow for non equivariant convolutions + # over the frequency axis. + frs = torch.arange(x.shape[-2], device=x.device) + emb = self.freq_emb(frs).t()[None, :, :, None].expand_as(x) + x = x + self.freq_emb_scale * emb + + saved.append(x) + if self.crosstransformer: + if self.bottom_channels: + b, c, f, t = x.shape + x = rearrange(x, "b c f t-> b c (f t)") + x = self.channel_upsampler(x) + x = rearrange(x, "b c (f t)-> b c f t", f=f) + xt = self.channel_upsampler_t(xt) + + x, xt = self.crosstransformer(x, xt) + # print("Cross Tran X {}, XT: {}".format(x.shape, xt.shape)) + + if self.bottom_channels: + x = rearrange(x, "b c f t-> b c (f t)") + x = self.channel_downsampler(x) + x = rearrange(x, "b c (f t)-> b c f t", f=f) + xt = self.channel_downsampler_t(xt) + + for idx, decode in enumerate(self.decoder): + skip = saved.pop(-1) + x, pre = decode(x, skip, lengths.pop(-1)) + # print('Decode {} X: {}'.format(idx, x.shape)) + # `pre` contains the output just before final transposed convolution, + # which is used when the freq. and time branch separate. + + offset = self.depth - len(self.tdecoder) + if idx >= offset: + tdec = self.tdecoder[idx - offset] + length_t = lengths_t.pop(-1) + if tdec.empty: + assert pre.shape[2] == 1, pre.shape + pre = pre[:, :, 0] + xt, _ = tdec(pre, None, length_t) + else: + skip = saved_t.pop(-1) + xt, _ = tdec(xt, skip, length_t) + # print('Decode {} XT: {}'.format(idx, xt.shape)) + + # Let's make sure we used all stored skip connections. + assert len(saved) == 0 + assert len(lengths_t) == 0 + assert len(saved_t) == 0 + + S = len(self.sources) + + if self.num_subbands > 1: + x = x.view(B, -1, Fq, T) + # print("X view 1: {}".format(x.shape)) + x = self.cws2cac(x) + # print("X view 2: {}".format(x.shape)) + + x = x.view(B, S, -1, Fq * self.num_subbands, T) + x = x * std[:, None] + mean[:, None] + # print("X returned: {}".format(x.shape)) + + zout = self._mask(z, x) + if self.use_train_segment: + if self.training: + x = self._ispec(zout, length) + else: + x = self._ispec(zout, training_length) + else: + x = self._ispec(zout, length) + + if self.use_train_segment: + if self.training: + xt = xt.view(B, S, -1, length) + else: + xt = xt.view(B, S, -1, training_length) + else: + xt = xt.view(B, S, -1, length) + xt = xt * stdt[:, None] + meant[:, None] + x = xt + x + if length_pre_pad: + x = x[..., :length_pre_pad] + return x + + +def get_model(args): + extra = { + 'sources': list(args.training.instruments), + 'audio_channels': args.training.channels, + 'samplerate': args.training.samplerate, + # 'segment': args.model_segment or 4 * args.dset.segment, + 'segment': args.training.segment, + } + klass = { + 'demucs': Demucs, + 'hdemucs': HDemucs, + 'htdemucs': HTDemucs, + }[args.model] + kw = OmegaConf.to_container(getattr(args, args.model), resolve=True) + model = klass(**extra, **kw) + return model + + diff --git a/data_pipeline/seperation/models/mdx23c_tfc_tdf_v3.py b/data_pipeline/seperation/models/mdx23c_tfc_tdf_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..caa818cfd8db56783743abc3e6cbbe572ab9451f --- /dev/null +++ b/data_pipeline/seperation/models/mdx23c_tfc_tdf_v3.py @@ -0,0 +1,242 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + + +class STFT: + def __init__(self, config): + self.n_fft = config.n_fft + self.hop_length = config.hop_length + self.window = torch.hann_window(window_length=self.n_fft, periodic=True) + self.dim_f = config.dim_f + + def __call__(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-2] + c, t = x.shape[-2:] + x = x.reshape([-1, t]) + x = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + window=window, + center=True, + return_complex=True + ) + x = torch.view_as_real(x) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([*batch_dims, c, 2, -1, x.shape[-1]]).reshape([*batch_dims, c * 2, -1, x.shape[-1]]) + return x[..., :self.dim_f, :] + + def inverse(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-3] + c, f, t = x.shape[-3:] + n = self.n_fft // 2 + 1 + f_pad = torch.zeros([*batch_dims, c, n - f, t]).to(x.device) + x = torch.cat([x, f_pad], -2) + x = x.reshape([*batch_dims, c // 2, 2, n, t]).reshape([-1, 2, n, t]) + x = x.permute([0, 2, 3, 1]) + x = x[..., 0] + x[..., 1] * 1.j + x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop_length, window=window, center=True) + x = x.reshape([*batch_dims, 2, -1]) + return x + + +def get_norm(norm_type): + def norm(c, norm_type): + if norm_type == 'BatchNorm': + return nn.BatchNorm2d(c) + elif norm_type == 'InstanceNorm': + return nn.InstanceNorm2d(c, affine=True) + elif 'GroupNorm' in norm_type: + g = int(norm_type.replace('GroupNorm', '')) + return nn.GroupNorm(num_groups=g, num_channels=c) + else: + return nn.Identity() + + return partial(norm, norm_type=norm_type) + + +def get_act(act_type): + if act_type == 'gelu': + return nn.GELU() + elif act_type == 'relu': + return nn.ReLU() + elif act_type[:3] == 'elu': + alpha = float(act_type.replace('elu', '')) + return nn.ELU(alpha) + else: + raise Exception + + +class Upscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.ConvTranspose2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class Downscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class TFC_TDF(nn.Module): + def __init__(self, in_c, c, l, f, bn, norm, act): + super().__init__() + + self.blocks = nn.ModuleList() + for i in range(l): + block = nn.Module() + + block.tfc1 = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_c, c, 3, 1, 1, bias=False), + ) + block.tdf = nn.Sequential( + norm(c), + act, + nn.Linear(f, f // bn, bias=False), + norm(c), + act, + nn.Linear(f // bn, f, bias=False), + ) + block.tfc2 = nn.Sequential( + norm(c), + act, + nn.Conv2d(c, c, 3, 1, 1, bias=False), + ) + block.shortcut = nn.Conv2d(in_c, c, 1, 1, 0, bias=False) + + self.blocks.append(block) + in_c = c + + def forward(self, x): + for block in self.blocks: + s = block.shortcut(x) + x = block.tfc1(x) + x = x + block.tdf(x) + x = block.tfc2(x) + x = x + s + return x + + +class TFC_TDF_net(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + + norm = get_norm(norm_type=config.model.norm) + act = get_act(act_type=config.model.act) + + self.num_target_instruments = 1 if config.training.target_instrument else len(config.training.instruments) + self.num_subbands = config.model.num_subbands + + dim_c = self.num_subbands * config.audio.num_channels * 2 + n = config.model.num_scales + scale = config.model.scale + l = config.model.num_blocks_per_scale + c = config.model.num_channels + g = config.model.growth + bn = config.model.bottleneck_factor + f = config.audio.dim_f // self.num_subbands + + self.first_conv = nn.Conv2d(dim_c, c, 1, 1, 0, bias=False) + + self.encoder_blocks = nn.ModuleList() + for i in range(n): + block = nn.Module() + block.tfc_tdf = TFC_TDF(c, c, l, f, bn, norm, act) + block.downscale = Downscale(c, c + g, scale, norm, act) + f = f // scale[1] + c += g + self.encoder_blocks.append(block) + + self.bottleneck_block = TFC_TDF(c, c, l, f, bn, norm, act) + + self.decoder_blocks = nn.ModuleList() + for i in range(n): + block = nn.Module() + block.upscale = Upscale(c, c - g, scale, norm, act) + f = f * scale[1] + c -= g + block.tfc_tdf = TFC_TDF(2 * c, c, l, f, bn, norm, act) + self.decoder_blocks.append(block) + + self.final_conv = nn.Sequential( + nn.Conv2d(c + dim_c, c, 1, 1, 0, bias=False), + act, + nn.Conv2d(c, self.num_target_instruments * dim_c, 1, 1, 0, bias=False) + ) + + self.stft = STFT(config.audio) + + def cac2cws(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c, k, f // k, t) + x = x.reshape(b, c * k, f // k, t) + return x + + def cws2cac(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c // k, k, f, t) + x = x.reshape(b, c // k, f * k, t) + return x + + def forward(self, x): + + x = self.stft(x) + + mix = x = self.cac2cws(x) + + first_conv_out = x = self.first_conv(x) + + x = x.transpose(-1, -2) + + encoder_outputs = [] + for block in self.encoder_blocks: + x = block.tfc_tdf(x) + encoder_outputs.append(x) + x = block.downscale(x) + + x = self.bottleneck_block(x) + + for block in self.decoder_blocks: + x = block.upscale(x) + x = torch.cat([x, encoder_outputs.pop()], 1) + x = block.tfc_tdf(x) + + x = x.transpose(-1, -2) + + x = x * first_conv_out # reduce artifacts + + x = self.final_conv(torch.cat([mix, x], 1)) + + x = self.cws2cac(x) + + if self.num_target_instruments > 1: + b, c, f, t = x.shape + x = x.reshape(b, self.num_target_instruments, -1, f, t) + + x = self.stft.inverse(x) + + return x diff --git a/data_pipeline/seperation/models/scnet/__init__.py b/data_pipeline/seperation/models/scnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f6ecefede9345237623066dd21ebd8253af1c60 --- /dev/null +++ b/data_pipeline/seperation/models/scnet/__init__.py @@ -0,0 +1 @@ +from .scnet import SCNet diff --git a/data_pipeline/seperation/models/scnet/scnet.py b/data_pipeline/seperation/models/scnet/scnet.py new file mode 100644 index 0000000000000000000000000000000000000000..320ac1a3af3491597e7cb7e8a9533bd0181b7932 --- /dev/null +++ b/data_pipeline/seperation/models/scnet/scnet.py @@ -0,0 +1,373 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from collections import deque +from .separation import SeparationNet +import typing as tp +import math + +class Swish(nn.Module): + def forward(self, x): + return x * x.sigmoid() + + +class ConvolutionModule(nn.Module): + """ + Convolution Module in SD block. + + Args: + channels (int): input/output channels. + depth (int): number of layers in the residual branch. Each layer has its own + compress (float): amount of channel compression. + kernel (int): kernel size for the convolutions. + """ + def __init__(self, channels, depth=2, compress=4, kernel=3): + super().__init__() + assert kernel % 2 == 1 + self.depth = abs(depth) + hidden_size = int(channels / compress) + norm = lambda d: nn.GroupNorm(1, d) + self.layers = nn.ModuleList([]) + for _ in range(self.depth): + padding = (kernel // 2) + mods = [ + norm(channels), + nn.Conv1d(channels, hidden_size*2, kernel, padding = padding), + nn.GLU(1), + nn.Conv1d(hidden_size, hidden_size, kernel, padding = padding, groups = hidden_size), + norm(hidden_size), + Swish(), + nn.Conv1d(hidden_size, channels, 1), + ] + layer = nn.Sequential(*mods) + self.layers.append(layer) + + def forward(self, x): + for layer in self.layers: + x = x + layer(x) + return x + + +class FusionLayer(nn.Module): + """ + A FusionLayer within the decoder. + + Args: + - channels (int): Number of input channels. + - kernel_size (int, optional): Kernel size for the convolutional layer, defaults to 3. + - stride (int, optional): Stride for the convolutional layer, defaults to 1. + - padding (int, optional): Padding for the convolutional layer, defaults to 1. + """ + + def __init__(self, channels, kernel_size=3, stride=1, padding=1): + super(FusionLayer, self).__init__() + self.conv = nn.Conv2d(channels * 2, channels * 2, kernel_size, stride=stride, padding=padding) + + def forward(self, x, skip=None): + if skip is not None: + x += skip + x = x.repeat(1, 2, 1, 1) + x = self.conv(x) + x = F.glu(x, dim=1) + return x + + +class SDlayer(nn.Module): + """ + Implements a Sparse Down-sample Layer for processing different frequency bands separately. + + Args: + - channels_in (int): Input channel count. + - channels_out (int): Output channel count. + - band_configs (dict): A dictionary containing configuration for each frequency band. + Keys are 'low', 'mid', 'high' for each band, and values are + dictionaries with keys 'SR', 'stride', and 'kernel' for proportion, + stride, and kernel size, respectively. + """ + def __init__(self, channels_in, channels_out, band_configs): + super(SDlayer, self).__init__() + + # Initializing convolutional layers for each band + self.convs = nn.ModuleList() + self.strides = [] + self.kernels = [] + for config in band_configs.values(): + self.convs.append(nn.Conv2d(channels_in, channels_out, (config['kernel'], 1), (config['stride'], 1), (0, 0))) + self.strides.append(config['stride']) + self.kernels.append(config['kernel']) + + # Saving rate proportions for determining splits + self.SR_low = band_configs['low']['SR'] + self.SR_mid = band_configs['mid']['SR'] + + def forward(self, x): + B, C, Fr, T = x.shape + # Define splitting points based on sampling rates + splits = [ + (0, math.ceil(Fr * self.SR_low)), + (math.ceil(Fr * self.SR_low), math.ceil(Fr * (self.SR_low + self.SR_mid))), + (math.ceil(Fr * (self.SR_low + self.SR_mid)), Fr) + ] + + # Processing each band with the corresponding convolution + outputs = [] + original_lengths=[] + for conv, stride, kernel, (start, end) in zip(self.convs, self.strides, self.kernels, splits): + extracted = x[:, :, start:end, :] + original_lengths.append(end-start) + current_length = extracted.shape[2] + + # padding + if stride == 1: + total_padding = kernel - stride + else: + total_padding = (stride - current_length % stride) % stride + pad_left = total_padding // 2 + pad_right = total_padding - pad_left + + padded = F.pad(extracted, (0, 0, pad_left, pad_right)) + + output = conv(padded) + outputs.append(output) + + return outputs, original_lengths + + +class SUlayer(nn.Module): + """ + Implements a Sparse Up-sample Layer in decoder. + + Args: + - channels_in: The number of input channels. + - channels_out: The number of output channels. + - convtr_configs: Dictionary containing the configurations for transposed convolutions. + """ + def __init__(self, channels_in, channels_out, band_configs): + super(SUlayer, self).__init__() + + # Initializing convolutional layers for each band + self.convtrs = nn.ModuleList([ + nn.ConvTranspose2d(channels_in, channels_out, [config['kernel'], 1], [config['stride'], 1]) + for _, config in band_configs.items() + ]) + + def forward(self, x, lengths, origin_lengths): + B, C, Fr, T = x.shape + # Define splitting points based on input lengths + splits = [ + (0, lengths[0]), + (lengths[0], lengths[0] + lengths[1]), + (lengths[0] + lengths[1], None) + ] + # Processing each band with the corresponding convolution + outputs = [] + for idx, (convtr, (start, end)) in enumerate(zip(self.convtrs, splits)): + out = convtr(x[:, :, start:end, :]) + # Calculate the distance to trim the output symmetrically to original length + current_Fr_length = out.shape[2] + dist = abs(origin_lengths[idx] - current_Fr_length) // 2 + + # Trim the output to the original length symmetrically + trimmed_out = out[:, :, dist:dist + origin_lengths[idx], :] + + outputs.append(trimmed_out) + + # Concatenate trimmed outputs along the frequency dimension to return the final tensor + x = torch.cat(outputs, dim=2) + + return x + + +class SDblock(nn.Module): + """ + Implements a simplified Sparse Down-sample block in encoder. + + Args: + - channels_in (int): Number of input channels. + - channels_out (int): Number of output channels. + - band_config (dict): Configuration for the SDlayer specifying band splits and convolutions. + - conv_config (dict): Configuration for convolution modules applied to each band. + - depths (list of int): List specifying the convolution depths for low, mid, and high frequency bands. + """ + def __init__(self, channels_in, channels_out, band_configs={}, conv_config={}, depths=[3, 2, 1], kernel_size=3): + super(SDblock, self).__init__() + self.SDlayer = SDlayer(channels_in, channels_out, band_configs) + + # Dynamically create convolution modules for each band based on depths + self.conv_modules = nn.ModuleList([ + ConvolutionModule(channels_out, depth, **conv_config) for depth in depths + ]) + #Set the kernel_size to an odd number. + self.globalconv = nn.Conv2d(channels_out, channels_out, kernel_size, 1, (kernel_size - 1) // 2) + + def forward(self, x): + bands, original_lengths = self.SDlayer(x) + # B, C, f, T = band.shape + bands = [ + F.gelu( + conv(band.permute(0, 2, 1, 3).reshape(-1, band.shape[1], band.shape[3])) + .view(band.shape[0], band.shape[2], band.shape[1], band.shape[3]) + .permute(0, 2, 1, 3) + ) + for conv, band in zip(self.conv_modules, bands) + + ] + lengths = [band.size(-2) for band in bands] + full_band = torch.cat(bands, dim=2) + skip = full_band + + output = self.globalconv(full_band) + + return output, skip, lengths, original_lengths + + +class SCNet(nn.Module): + """ + The implementation of SCNet: Sparse Compression Network for Music Source Separation. Paper: https://arxiv.org/abs/2401.13276.pdf + + Args: + - sources (List[str]): List of sources to be separated. + - audio_channels (int): Number of audio channels. + - nfft (int): Number of FFTs to determine the frequency dimension of the input. + - hop_size (int): Hop size for the STFT. + - win_size (int): Window size for STFT. + - normalized (bool): Whether to normalize the STFT. + - dims (List[int]): List of channel dimensions for each block. + - band_configs (Dict[str, Dict[str, int]]): Configuration for each frequency band, including how to divide the frequency bands, + and the settings for the upsampling/downsampling convolutional layers. + - conv_depths (List[int]): List specifying the number of convolution modules in each SD block. + - compress (int): Compression factor for convolution module. + - conv_kernel (int): Kernel size for convolution layer in convolution module. + - num_dplayer (int): Number of dual-path layers. + - expand (int): Expansion factor in the dual-path RNN, default is 1. + + """ + def __init__(self, + sources = ['drums', 'bass', 'other', 'vocals'], + audio_channels = 2, + # Main structure + dims = [4, 32, 64, 128], # dims = [4, 64, 128, 256] in SCNet-large + # STFT + nfft = 4096, + hop_size = 1024, + win_size = 4096, + normalized = True, + # SD/SU layer + band_configs = { + 'low': { 'SR': .175, 'stride': 1, 'kernel': 3 }, + 'mid': { 'SR': .392, 'stride': 4, 'kernel': 4 }, + 'high': {'SR': .433, 'stride': 16, 'kernel': 16 } + }, + # Convolution Module + conv_depths = [3,2,1], + compress = 4, + conv_kernel = 3, + # Dual-path RNN + num_dplayer = 6, + expand = 1, + # mamba + use_mamba = False, + mamba_config = { + 'd_stat': 16, + 'd_conv': 4, + 'd_expand': 2 + }): + super().__init__() + self.sources = sources + self.audio_channels = audio_channels + self.dims = dims + self.band_configs = band_configs + self.hop_length = hop_size + self.conv_config = { + 'compress': compress, + 'kernel': conv_kernel, + } + + self.stft_config = { + 'n_fft': nfft, + 'hop_length': hop_size, + 'win_length': win_size, + 'center': True, + 'normalized': normalized + } + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + for index in range(len(dims)-1): + enc = SDblock( + channels_in = dims[index], + channels_out = dims[index+1], + band_configs = self.band_configs, + conv_config = self.conv_config, + depths = conv_depths + ) + self.encoder.append(enc) + + dec = nn.Sequential( + FusionLayer(channels = dims[index+1]), + SUlayer( + channels_in = dims[index+1], + channels_out = dims[index] if index != 0 else dims[index] * len(sources), + band_configs = self.band_configs, + ) + ) + self.decoder.insert(0, dec) + + self.separation_net = SeparationNet( + channels = dims[-1], + expand = expand, + num_layers = num_dplayer, + use_mamba = use_mamba, + **mamba_config + ) + + + def forward(self, x): + # B, C, L = x.shape + B = x.shape[0] + # In the initial padding, ensure that the number of frames after the STFT (the length of the T dimension) is even, + # so that the RFFT operation can be used in the separation network. + padding = self.hop_length - x.shape[-1] % self.hop_length + if (x.shape[-1] + padding) // self.hop_length % 2 == 0: + padding += self.hop_length + x = F.pad(x, (0, padding)) + + # STFT + L = x.shape[-1] + x = x.reshape(-1, L) + x = torch.stft(x, **self.stft_config, return_complex=True) + x = torch.view_as_real(x) + x = x.permute(0, 3, 1, 2).reshape(x.shape[0]//self.audio_channels, x.shape[3]*self.audio_channels, x.shape[1], x.shape[2]) + + B, C, Fr, T = x.shape + + save_skip = deque() + save_lengths = deque() + save_original_lengths = deque() + # encoder + for sd_layer in self.encoder: + x, skip, lengths, original_lengths = sd_layer(x) + save_skip.append(skip) + save_lengths.append(lengths) + save_original_lengths.append(original_lengths) + + #separation + x = self.separation_net(x) + + #decoder + for fusion_layer, su_layer in self.decoder: + x = fusion_layer(x, save_skip.pop()) + x = su_layer(x, save_lengths.pop(), save_original_lengths.pop()) + + #output + n = self.dims[0] + x = x.view(B, n, -1, Fr, T) + x = x.reshape(-1, 2, Fr, T).permute(0, 2, 3, 1) + x = torch.view_as_complex(x.contiguous()) + x = torch.istft(x, **self.stft_config) + x = x.reshape(B, len(self.sources), self.audio_channels, -1) + + x = x[:, :, :, :-padding] + + return x \ No newline at end of file diff --git a/data_pipeline/seperation/models/scnet/separation.py b/data_pipeline/seperation/models/scnet/separation.py new file mode 100644 index 0000000000000000000000000000000000000000..180df7b84d69a81c3da13932f000644b442d0272 --- /dev/null +++ b/data_pipeline/seperation/models/scnet/separation.py @@ -0,0 +1,178 @@ +import torch +import torch.nn as nn +from torch.nn.modules.rnn import LSTM +import torch.nn.functional as Func +try: + from mamba_ssm.modules.mamba_simple import Mamba +except Exception as e: + print('No mamba found. Please install mamba_ssm') + +class RMSNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return Func.normalize(x, dim=-1) * self.scale * self.gamma + + +class MambaModule(nn.Module): + def __init__(self, d_model, d_state, d_conv, d_expand): + super().__init__() + self.norm = RMSNorm(dim=d_model) + self.mamba = Mamba( + d_model=d_model, + d_state=d_state, + d_conv=d_conv, + expand=d_expand + ) + + def forward(self, x): + x = x + self.mamba(self.norm(x)) + return x + + +class FeatureConversion(nn.Module): + """ + Integrates into the adjacent Dual-Path layer. + + Args: + channels (int): Number of input channels. + inverse (bool): If True, uses ifft; otherwise, uses rfft. + """ + def __init__(self, channels, inverse): + super().__init__() + self.inverse = inverse + self.channels= channels + + def forward(self, x): + # B, C, F, T = x.shape + if self.inverse: + x = x.float() + x_r = x[:, :self.channels//2, :, :] + x_i = x[:, self.channels//2:, :, :] + x = torch.complex(x_r, x_i) + x = torch.fft.irfft(x, dim=3, norm="ortho") + else: + x = x.float() + x = torch.fft.rfft(x, dim=3, norm="ortho") + x_real = x.real + x_imag = x.imag + x = torch.cat([x_real, x_imag], dim=1) + return x + + +class DualPathRNN(nn.Module): + """ + Dual-Path RNN in Separation Network. + + Args: + d_model (int): The number of expected features in the input (input_size). + expand (int): Expansion factor used to calculate the hidden_size of LSTM. + bidirectional (bool): If True, becomes a bidirectional LSTM. + """ + def __init__(self, d_model, expand, bidirectional=True): + super(DualPathRNN, self).__init__() + + self.d_model = d_model + self.hidden_size = d_model * expand + self.bidirectional = bidirectional + # Initialize LSTM layers and normalization layers + self.lstm_layers = nn.ModuleList([self._init_lstm_layer(self.d_model, self.hidden_size) for _ in range(2)]) + self.linear_layers = nn.ModuleList([nn.Linear(self.hidden_size*2, self.d_model) for _ in range(2)]) + self.norm_layers = nn.ModuleList([nn.GroupNorm(1, d_model) for _ in range(2)]) + + def _init_lstm_layer(self, d_model, hidden_size): + return LSTM(d_model, hidden_size, num_layers=1, bidirectional=self.bidirectional, batch_first=True) + + def forward(self, x): + B, C, F, T = x.shape + + # Process dual-path rnn + + original_x = x + # Frequency-path + x = self.norm_layers[0](x) + x = x.transpose(1, 3).contiguous().view(B * T, F, C) + x, _ = self.lstm_layers[0](x) + x = self.linear_layers[0](x) + x = x.view(B, T, F, C).transpose(1, 3) + x = x + original_x + + original_x = x + # Time-path + x = self.norm_layers[1](x) + x = x.transpose(1, 2).contiguous().view(B * F, C, T).transpose(1, 2) + x, _ = self.lstm_layers[1](x) + x = self.linear_layers[1](x) + x = x.transpose(1, 2).contiguous().view(B, F, C, T).transpose(1, 2) + x = x + original_x + + return x + + +class DualPathMamba(nn.Module): + """ + Dual-Path Mamba. + + """ + def __init__(self, d_model, d_stat, d_conv, d_expand): + super(DualPathMamba, self).__init__() + # Initialize mamba layers + self.mamba_layers = nn.ModuleList([MambaModule(d_model, d_stat, d_conv, d_expand) for _ in range(2)]) + + def forward(self, x): + B, C, F, T = x.shape + + # Process dual-path mamba + + # Frequency-path + x = x.transpose(1, 3).contiguous().view(B * T, F, C) + x = self.mamba_layers[0](x) + x = x.view(B, T, F, C).transpose(1, 3) + + # Time-path + x = x.transpose(1, 2).contiguous().view(B * F, C, T).transpose(1, 2) + x = self.mamba_layers[1](x) + x = x.transpose(1, 2).contiguous().view(B, F, C, T).transpose(1, 2) + + return x + + +class SeparationNet(nn.Module): + """ + Implements a simplified Sparse Down-sample block in an encoder architecture. + + Args: + - channels (int): Number input channels. + - expand (int): Expansion factor used to calculate the hidden_size of LSTM. + - num_layers (int): Number of dual-path layers. + - use_mamba (bool): If true, use the Mamba module to replace the RNN. + - d_stat (int), d_conv (int), d_expand (int): These are built-in parameters of the Mamba model. + """ + def __init__(self, channels, expand=1, num_layers=6, use_mamba=True, d_stat=16, d_conv=4, d_expand=2): + super(SeparationNet, self).__init__() + + self.num_layers = num_layers + if use_mamba: + self.dp_modules = nn.ModuleList([ + DualPathMamba(channels * (2 if i % 2 == 1 else 1), d_stat, d_conv, d_expand * (2 if i % 2 == 1 else 1)) for i in range(num_layers) + ]) + else: + self.dp_modules = nn.ModuleList([ + DualPathRNN(channels * (2 if i % 2 == 1 else 1), expand) for i in range(num_layers) + ]) + + self.feature_conversion = nn.ModuleList([ + FeatureConversion(channels * 2 , inverse = False if i % 2 == 0 else True) for i in range(num_layers) + ]) + def forward(self, x): + for i in range(self.num_layers): + x = self.dp_modules[i](x) + x = self.feature_conversion[i](x) + return x + + + + diff --git a/data_pipeline/seperation/models/scnet_unofficial/__init__.py b/data_pipeline/seperation/models/scnet_unofficial/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6d034d38a2ace2e81bd28d63dd8f25feb918f33d --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/__init__.py @@ -0,0 +1 @@ +from models.scnet_unofficial.scnet import SCNet \ No newline at end of file diff --git a/data_pipeline/seperation/models/scnet_unofficial/modules/__init__.py b/data_pipeline/seperation/models/scnet_unofficial/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..69617bb15044d9bbfd0211fcdfa0fa605b01c048 --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/modules/__init__.py @@ -0,0 +1,3 @@ +from models.scnet_unofficial.modules.dualpath_rnn import DualPathRNN +from models.scnet_unofficial.modules.sd_encoder import SDBlock +from models.scnet_unofficial.modules.su_decoder import SUBlock diff --git a/data_pipeline/seperation/models/scnet_unofficial/modules/dualpath_rnn.py b/data_pipeline/seperation/models/scnet_unofficial/modules/dualpath_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2dfcdbcfc102a6fde5a2ff53a2a06f2d6caae196 --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/modules/dualpath_rnn.py @@ -0,0 +1,228 @@ +import torch +import torch.nn as nn +import torch.nn.functional as Func + +class RMSNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return Func.normalize(x, dim=-1) * self.scale * self.gamma + + +class MambaModule(nn.Module): + def __init__(self, d_model, d_state, d_conv, d_expand): + super().__init__() + self.norm = RMSNorm(dim=d_model) + self.mamba = Mamba( + d_model=d_model, + d_state=d_state, + d_conv=d_conv, + d_expand=d_expand + ) + + def forward(self, x): + x = x + self.mamba(self.norm(x)) + return x + + +class RNNModule(nn.Module): + """ + RNNModule class implements a recurrent neural network module with LSTM cells. + + Args: + - input_dim (int): Dimensionality of the input features. + - hidden_dim (int): Dimensionality of the hidden state of the LSTM. + - bidirectional (bool, optional): If True, uses bidirectional LSTM. Defaults to True. + + Shapes: + - Input: (B, T, D) where + B is batch size, + T is sequence length, + D is input dimensionality. + - Output: (B, T, D) where + B is batch size, + T is sequence length, + D is input dimensionality. + """ + + def __init__(self, input_dim: int, hidden_dim: int, bidirectional: bool = True): + """ + Initializes RNNModule with input dimension, hidden dimension, and bidirectional flag. + """ + super().__init__() + self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=input_dim) + self.rnn = nn.LSTM( + input_dim, hidden_dim, batch_first=True, bidirectional=bidirectional + ) + self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, input_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the RNNModule. + + Args: + - x (torch.Tensor): Input tensor of shape (B, T, D). + + Returns: + - torch.Tensor: Output tensor of shape (B, T, D). + """ + x = x.transpose(1, 2) + x = self.groupnorm(x) + x = x.transpose(1, 2) + + x, (hidden, _) = self.rnn(x) + x = self.fc(x) + return x + + +class RFFTModule(nn.Module): + """ + RFFTModule class implements a module for performing real-valued Fast Fourier Transform (FFT) + or its inverse on input tensors. + + Args: + - inverse (bool, optional): If False, performs forward FFT. If True, performs inverse FFT. Defaults to False. + + Shapes: + - Input: (B, F, T, D) where + B is batch size, + F is the number of features, + T is sequence length, + D is input dimensionality. + - Output: (B, F, T // 2 + 1, D * 2) if performing forward FFT. + (B, F, T, D // 2, 2) if performing inverse FFT. + """ + + def __init__(self, inverse: bool = False): + """ + Initializes RFFTModule with inverse flag. + """ + super().__init__() + self.inverse = inverse + + def forward(self, x: torch.Tensor, time_dim: int) -> torch.Tensor: + """ + Performs forward or inverse FFT on the input tensor x. + + Args: + - x (torch.Tensor): Input tensor of shape (B, F, T, D). + - time_dim (int): Input size of time dimension. + + Returns: + - torch.Tensor: Output tensor after FFT or its inverse operation. + """ + dtype = x.dtype + B, F, T, D = x.shape + + # RuntimeError: cuFFT only supports dimensions whose sizes are powers of two when computing in half precision + x = x.float() + + if not self.inverse: + x = torch.fft.rfft(x, dim=2) + x = torch.view_as_real(x) + x = x.reshape(B, F, T // 2 + 1, D * 2) + else: + x = x.reshape(B, F, T, D // 2, 2) + x = torch.view_as_complex(x) + x = torch.fft.irfft(x, n=time_dim, dim=2) + + x = x.to(dtype) + return x + + def extra_repr(self) -> str: + """ + Returns extra representation string with module's configuration. + """ + return f"inverse={self.inverse}" + + +class DualPathRNN(nn.Module): + """ + DualPathRNN class implements a neural network with alternating layers of RNNModule and RFFTModule. + + Args: + - n_layers (int): Number of layers in the network. + - input_dim (int): Dimensionality of the input features. + - hidden_dim (int): Dimensionality of the hidden state of the RNNModule. + + Shapes: + - Input: (B, F, T, D) where + B is batch size, + F is the number of features (frequency dimension), + T is sequence length (time dimension), + D is input dimensionality (channel dimension). + - Output: (B, F, T, D) where + B is batch size, + F is the number of features (frequency dimension), + T is sequence length (time dimension), + D is input dimensionality (channel dimension). + """ + + def __init__( + self, + n_layers: int, + input_dim: int, + hidden_dim: int, + + use_mamba: bool = False, + d_state: int = 16, + d_conv: int = 4, + d_expand: int = 2 + ): + """ + Initializes DualPathRNN with the specified number of layers, input dimension, and hidden dimension. + """ + super().__init__() + + if use_mamba: + from mamba_ssm.modules.mamba_simple import Mamba + net = MambaModule + dkwargs = {"d_model": input_dim, "d_state": d_state, "d_conv": d_conv, "d_expand": d_expand} + ukwargs = {"d_model": input_dim * 2, "d_state": d_state, "d_conv": d_conv, "d_expand": d_expand * 2} + else: + net = RNNModule + dkwargs = {"input_dim": input_dim, "hidden_dim": hidden_dim} + ukwargs = {"input_dim": input_dim * 2, "hidden_dim": hidden_dim * 2} + + self.layers = nn.ModuleList() + for i in range(1, n_layers + 1): + kwargs = dkwargs if i % 2 == 1 else ukwargs + layer = nn.ModuleList([ + net(**kwargs), + net(**kwargs), + RFFTModule(inverse=(i % 2 == 0)), + ]) + self.layers.append(layer) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the DualPathRNN. + + Args: + - x (torch.Tensor): Input tensor of shape (B, F, T, D). + + Returns: + - torch.Tensor: Output tensor of shape (B, F, T, D). + """ + + time_dim = x.shape[2] + + for time_layer, freq_layer, rfft_layer in self.layers: + B, F, T, D = x.shape + + x = x.reshape((B * F), T, D) + x = time_layer(x) + x = x.reshape(B, F, T, D) + x = x.permute(0, 2, 1, 3) + + x = x.reshape((B * T), F, D) + x = freq_layer(x) + x = x.reshape(B, T, F, D) + x = x.permute(0, 2, 1, 3) + + x = rfft_layer(x, time_dim) + + return x diff --git a/data_pipeline/seperation/models/scnet_unofficial/modules/sd_encoder.py b/data_pipeline/seperation/models/scnet_unofficial/modules/sd_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..742577f480693671437dc50358a1a65d251b6e9b --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/modules/sd_encoder.py @@ -0,0 +1,285 @@ +from typing import List, Tuple + +import torch +import torch.nn as nn + +from models.scnet_unofficial.utils import create_intervals + + +class Downsample(nn.Module): + """ + Downsample class implements a module for downsampling input tensors using 2D convolution. + + Args: + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels. + - stride (int): Stride value for the convolution operation. + + Shapes: + - Input: (B, C_in, F, T) where + B is batch size, + C_in is the number of input channels, + F is the frequency dimension, + T is the time dimension. + - Output: (B, C_out, F // stride, T) where + B is batch size, + C_out is the number of output channels, + F // stride is the downsampled frequency dimension. + + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + stride: int, + ): + """ + Initializes Downsample with input dimension, output dimension, and stride. + """ + super().__init__() + self.conv = nn.Conv2d(input_dim, output_dim, 1, (stride, 1)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the Downsample module. + + Args: + - x (torch.Tensor): Input tensor of shape (B, C_in, F, T). + + Returns: + - torch.Tensor: Downsampled tensor of shape (B, C_out, F // stride, T). + """ + return self.conv(x) + + +class ConvolutionModule(nn.Module): + """ + ConvolutionModule class implements a module with a sequence of convolutional layers similar to Conformer. + + Args: + - input_dim (int): Dimensionality of the input features. + - hidden_dim (int): Dimensionality of the hidden features. + - kernel_sizes (List[int]): List of kernel sizes for the convolutional layers. + - bias (bool, optional): If True, adds a learnable bias to the output. Default is False. + + Shapes: + - Input: (B, T, D) where + B is batch size, + T is sequence length, + D is input dimensionality. + - Output: (B, T, D) where + B is batch size, + T is sequence length, + D is input dimensionality. + """ + + def __init__( + self, + input_dim: int, + hidden_dim: int, + kernel_sizes: List[int], + bias: bool = False, + ) -> None: + """ + Initializes ConvolutionModule with input dimension, hidden dimension, kernel sizes, and bias. + """ + super().__init__() + self.sequential = nn.Sequential( + nn.GroupNorm(num_groups=1, num_channels=input_dim), + nn.Conv1d( + input_dim, + 2 * hidden_dim, + kernel_sizes[0], + stride=1, + padding=(kernel_sizes[0] - 1) // 2, + bias=bias, + ), + nn.GLU(dim=1), + nn.Conv1d( + hidden_dim, + hidden_dim, + kernel_sizes[1], + stride=1, + padding=(kernel_sizes[1] - 1) // 2, + groups=hidden_dim, + bias=bias, + ), + nn.GroupNorm(num_groups=1, num_channels=hidden_dim), + nn.SiLU(), + nn.Conv1d( + hidden_dim, + input_dim, + kernel_sizes[2], + stride=1, + padding=(kernel_sizes[2] - 1) // 2, + bias=bias, + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the ConvolutionModule. + + Args: + - x (torch.Tensor): Input tensor of shape (B, T, D). + + Returns: + - torch.Tensor: Output tensor of shape (B, T, D). + """ + x = x.transpose(1, 2) + x = x + self.sequential(x) + x = x.transpose(1, 2) + return x + + +class SDLayer(nn.Module): + """ + SDLayer class implements a subband decomposition layer with downsampling and convolutional modules. + + Args: + - subband_interval (Tuple[float, float]): Tuple representing the frequency interval for subband decomposition. + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels after downsampling. + - downsample_stride (int): Stride value for the downsampling operation. + - n_conv_modules (int): Number of convolutional modules. + - kernel_sizes (List[int]): List of kernel sizes for the convolutional layers. + - bias (bool, optional): If True, adds a learnable bias to the convolutional layers. Default is True. + + Shapes: + - Input: (B, Fi, T, Ci) where + B is batch size, + Fi is the number of input subbands, + T is sequence length, and + Ci is the number of input channels. + - Output: (B, Fi+1, T, Ci+1) where + B is batch size, + Fi+1 is the number of output subbands, + T is sequence length, + Ci+1 is the number of output channels. + """ + + def __init__( + self, + subband_interval: Tuple[float, float], + input_dim: int, + output_dim: int, + downsample_stride: int, + n_conv_modules: int, + kernel_sizes: List[int], + bias: bool = True, + ): + """ + Initializes SDLayer with subband interval, input dimension, + output dimension, downsample stride, number of convolutional modules, kernel sizes, and bias. + """ + super().__init__() + self.subband_interval = subband_interval + self.downsample = Downsample(input_dim, output_dim, downsample_stride) + self.activation = nn.GELU() + conv_modules = [ + ConvolutionModule( + input_dim=output_dim, + hidden_dim=output_dim // 4, + kernel_sizes=kernel_sizes, + bias=bias, + ) + for _ in range(n_conv_modules) + ] + self.conv_modules = nn.Sequential(*conv_modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the SDLayer. + + Args: + - x (torch.Tensor): Input tensor of shape (B, Fi, T, Ci). + + Returns: + - torch.Tensor: Output tensor of shape (B, Fi+1, T, Ci+1). + """ + B, F, T, C = x.shape + x = x[:, int(self.subband_interval[0] * F) : int(self.subband_interval[1] * F)] + x = x.permute(0, 3, 1, 2) + x = self.downsample(x) + x = self.activation(x) + x = x.permute(0, 2, 3, 1) + + B, F, T, C = x.shape + x = x.reshape((B * F), T, C) + x = self.conv_modules(x) + x = x.reshape(B, F, T, C) + + return x + + +class SDBlock(nn.Module): + """ + SDBlock class implements a block with subband decomposition layers and global convolution. + + Args: + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels. + - bandsplit_ratios (List[float]): List of ratios for splitting the frequency bands. + - downsample_strides (List[int]): List of stride values for downsampling in each subband layer. + - n_conv_modules (List[int]): List specifying the number of convolutional modules in each subband layer. + - kernel_sizes (List[int], optional): List of kernel sizes for the convolutional layers. Default is None. + + Shapes: + - Input: (B, Fi, T, Ci) where + B is batch size, + Fi is the number of input subbands, + T is sequence length, + Ci is the number of input channels. + - Output: (B, Fi+1, T, Ci+1) where + B is batch size, + Fi+1 is the number of output subbands, + T is sequence length, + Ci+1 is the number of output channels. + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + bandsplit_ratios: List[float], + downsample_strides: List[int], + n_conv_modules: List[int], + kernel_sizes: List[int] = None, + ): + """ + Initializes SDBlock with input dimension, output dimension, band split ratios, downsample strides, number of convolutional modules, and kernel sizes. + """ + super().__init__() + if kernel_sizes is None: + kernel_sizes = [3, 3, 1] + assert sum(bandsplit_ratios) == 1, "The split ratios must sum up to 1." + subband_intervals = create_intervals(bandsplit_ratios) + self.sd_layers = nn.ModuleList( + SDLayer( + input_dim=input_dim, + output_dim=output_dim, + subband_interval=sbi, + downsample_stride=dss, + n_conv_modules=ncm, + kernel_sizes=kernel_sizes, + ) + for sbi, dss, ncm in zip( + subband_intervals, downsample_strides, n_conv_modules + ) + ) + self.global_conv2d = nn.Conv2d(output_dim, output_dim, 1, 1) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Performs forward pass through the SDBlock. + + Args: + - x (torch.Tensor): Input tensor of shape (B, Fi, T, Ci). + + Returns: + - Tuple[torch.Tensor, torch.Tensor]: Output tensor and skip connection tensor. + """ + x_skip = torch.concat([layer(x) for layer in self.sd_layers], dim=1) + x = self.global_conv2d(x_skip.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + return x, x_skip diff --git a/data_pipeline/seperation/models/scnet_unofficial/modules/su_decoder.py b/data_pipeline/seperation/models/scnet_unofficial/modules/su_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..660c1fa6cbfd9b43bed73204a0bb6593524de272 --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/modules/su_decoder.py @@ -0,0 +1,241 @@ +from typing import List, Tuple + +import torch +import torch.nn as nn + +from models.scnet_unofficial.utils import get_convtranspose_output_padding + + +class FusionLayer(nn.Module): + """ + FusionLayer class implements a module for fusing two input tensors using convolutional operations. + + Args: + - input_dim (int): Dimensionality of the input channels. + - kernel_size (int, optional): Kernel size for the convolutional layer. Default is 3. + - stride (int, optional): Stride value for the convolutional layer. Default is 1. + - padding (int, optional): Padding value for the convolutional layer. Default is 1. + + Shapes: + - Input: (B, F, T, C) and (B, F, T, C) where + B is batch size, + F is the number of features, + T is sequence length, + C is input dimensionality. + - Output: (B, F, T, C) where + B is batch size, + F is the number of features, + T is sequence length, + C is input dimensionality. + """ + + def __init__( + self, input_dim: int, kernel_size: int = 3, stride: int = 1, padding: int = 1 + ): + """ + Initializes FusionLayer with input dimension, kernel size, stride, and padding. + """ + super().__init__() + self.conv = nn.Conv2d( + input_dim * 2, + input_dim * 2, + kernel_size=(kernel_size, 1), + stride=(stride, 1), + padding=(padding, 0), + ) + self.activation = nn.GLU() + + def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the FusionLayer. + + Args: + - x1 (torch.Tensor): First input tensor of shape (B, F, T, C). + - x2 (torch.Tensor): Second input tensor of shape (B, F, T, C). + + Returns: + - torch.Tensor: Output tensor of shape (B, F, T, C). + """ + x = x1 + x2 + x = x.repeat(1, 1, 1, 2) + x = self.conv(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + x = self.activation(x) + return x + + +class Upsample(nn.Module): + """ + Upsample class implements a module for upsampling input tensors using transposed 2D convolution. + + Args: + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels. + - stride (int): Stride value for the transposed convolution operation. + - output_padding (int): Output padding value for the transposed convolution operation. + + Shapes: + - Input: (B, C_in, F, T) where + B is batch size, + C_in is the number of input channels, + F is the frequency dimension, + T is the time dimension. + - Output: (B, C_out, F * stride + output_padding, T) where + B is batch size, + C_out is the number of output channels, + F * stride + output_padding is the upsampled frequency dimension. + """ + + def __init__( + self, input_dim: int, output_dim: int, stride: int, output_padding: int + ): + """ + Initializes Upsample with input dimension, output dimension, stride, and output padding. + """ + super().__init__() + self.conv = nn.ConvTranspose2d( + input_dim, output_dim, 1, (stride, 1), output_padding=(output_padding, 0) + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the Upsample module. + + Args: + - x (torch.Tensor): Input tensor of shape (B, C_in, F, T). + + Returns: + - torch.Tensor: Output tensor of shape (B, C_out, F * stride + output_padding, T). + """ + return self.conv(x) + + +class SULayer(nn.Module): + """ + SULayer class implements a subband upsampling layer using transposed convolution. + + Args: + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels. + - upsample_stride (int): Stride value for the upsampling operation. + - subband_shape (int): Shape of the subband. + - sd_interval (Tuple[int, int]): Start and end indices of the subband interval. + + Shapes: + - Input: (B, F, T, C) where + B is batch size, + F is the number of features, + T is sequence length, + C is input dimensionality. + - Output: (B, F, T, C) where + B is batch size, + F is the number of features, + T is sequence length, + C is input dimensionality. + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + upsample_stride: int, + subband_shape: int, + sd_interval: Tuple[int, int], + ): + """ + Initializes SULayer with input dimension, output dimension, upsample stride, subband shape, and subband interval. + """ + super().__init__() + sd_shape = sd_interval[1] - sd_interval[0] + upsample_output_padding = get_convtranspose_output_padding( + input_shape=sd_shape, output_shape=subband_shape, stride=upsample_stride + ) + self.upsample = Upsample( + input_dim=input_dim, + output_dim=output_dim, + stride=upsample_stride, + output_padding=upsample_output_padding, + ) + self.sd_interval = sd_interval + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the SULayer. + + Args: + - x (torch.Tensor): Input tensor of shape (B, F, T, C). + + Returns: + - torch.Tensor: Output tensor of shape (B, F, T, C). + """ + x = x[:, self.sd_interval[0] : self.sd_interval[1]] + x = x.permute(0, 3, 1, 2) + x = self.upsample(x) + x = x.permute(0, 2, 3, 1) + return x + + +class SUBlock(nn.Module): + """ + SUBlock class implements a block with fusion layer and subband upsampling layers. + + Args: + - input_dim (int): Dimensionality of the input channels. + - output_dim (int): Dimensionality of the output channels. + - upsample_strides (List[int]): List of stride values for the upsampling operations. + - subband_shapes (List[int]): List of shapes for the subbands. + - sd_intervals (List[Tuple[int, int]]): List of intervals for subband decomposition. + + Shapes: + - Input: (B, Fi-1, T, Ci-1) and (B, Fi-1, T, Ci-1) where + B is batch size, + Fi-1 is the number of input subbands, + T is sequence length, + Ci-1 is the number of input channels. + - Output: (B, Fi, T, Ci) where + B is batch size, + Fi is the number of output subbands, + T is sequence length, + Ci is the number of output channels. + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + upsample_strides: List[int], + subband_shapes: List[int], + sd_intervals: List[Tuple[int, int]], + ): + """ + Initializes SUBlock with input dimension, output dimension, + upsample strides, subband shapes, and subband intervals. + """ + super().__init__() + self.fusion_layer = FusionLayer(input_dim=input_dim) + self.su_layers = nn.ModuleList( + SULayer( + input_dim=input_dim, + output_dim=output_dim, + upsample_stride=uss, + subband_shape=sbs, + sd_interval=sdi, + ) + for i, (uss, sbs, sdi) in enumerate( + zip(upsample_strides, subband_shapes, sd_intervals) + ) + ) + + def forward(self, x: torch.Tensor, x_skip: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the SUBlock. + + Args: + - x (torch.Tensor): Input tensor of shape (B, Fi-1, T, Ci-1). + - x_skip (torch.Tensor): Input skip connection tensor of shape (B, Fi-1, T, Ci-1). + + Returns: + - torch.Tensor: Output tensor of shape (B, Fi, T, Ci). + """ + x = self.fusion_layer(x, x_skip) + x = torch.concat([layer(x) for layer in self.su_layers], dim=1) + return x diff --git a/data_pipeline/seperation/models/scnet_unofficial/scnet.py b/data_pipeline/seperation/models/scnet_unofficial/scnet.py new file mode 100644 index 0000000000000000000000000000000000000000..d076f85f1d5ce1345dc9a8c56b6a5aef09f2facc --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/scnet.py @@ -0,0 +1,249 @@ +''' +SCNet - great paper, great implementation +https://arxiv.org/pdf/2401.13276.pdf +https://github.com/amanteur/SCNet-PyTorch +''' + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchaudio + +from models.scnet_unofficial.modules import DualPathRNN, SDBlock, SUBlock +from models.scnet_unofficial.utils import compute_sd_layer_shapes, compute_gcr + +from einops import rearrange, pack, unpack +from functools import partial + +from beartype.typing import Tuple, Optional, List, Callable +from beartype import beartype + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +class RMSNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return F.normalize(x, dim=-1) * self.scale * self.gamma + + +class BandSplit(nn.Module): + @beartype + def __init__( + self, + dim, + dim_inputs: Tuple[int, ...] + ): + super().__init__() + self.dim_inputs = dim_inputs + self.to_features = ModuleList([]) + + for dim_in in dim_inputs: + net = nn.Sequential( + RMSNorm(dim_in), + nn.Linear(dim_in, dim) + ) + + self.to_features.append(net) + + def forward(self, x): + x = x.split(self.dim_inputs, dim=-1) + + outs = [] + for split_input, to_feature in zip(x, self.to_features): + split_output = to_feature(split_input) + outs.append(split_output) + + return torch.stack(outs, dim=-2) + + +class SCNet(nn.Module): + """ + SCNet class implements a source separation network, + which explicitly split the spectrogram of the mixture into several subbands + and introduce a sparsity-based encoder to model different frequency bands. + + Paper: "SCNET: SPARSE COMPRESSION NETWORK FOR MUSIC SOURCE SEPARATION" + Authors: Weinan Tong, Jiaxu Zhu et al. + Link: https://arxiv.org/abs/2401.13276.pdf + + Args: + - n_fft (int): Number of FFTs to determine the frequency dimension of the input. + - dims (List[int]): List of channel dimensions for each block. + - bandsplit_ratios (List[float]): List of ratios for splitting the frequency bands. + - downsample_strides (List[int]): List of stride values for downsampling in each block. + - n_conv_modules (List[int]): List specifying the number of convolutional modules in each block. + - n_rnn_layers (int): Number of recurrent layers in the dual path RNN. + - rnn_hidden_dim (int): Dimensionality of the hidden state in the dual path RNN. + - n_sources (int, optional): Number of sources to be separated. Default is 4. + + Shapes: + - Input: (B, C, T) where + B is batch size, + C is channel dim (mono / stereo), + T is time dim + - Output: (B, N, C, T) where + B is batch size, + N is the number of sources. + C is channel dim (mono / stereo), + T is sequence length, + """ + @beartype + def __init__( + self, + n_fft: int, + dims: List[int], + bandsplit_ratios: List[float], + downsample_strides: List[int], + n_conv_modules: List[int], + n_rnn_layers: int, + rnn_hidden_dim: int, + n_sources: int = 4, + hop_length: int = 1024, + win_length: int = 4096, + stft_window_fn: Optional[Callable] = None, + stft_normalized: bool = False, + **kwargs + ): + """ + Initializes SCNet with input parameters. + """ + super().__init__() + self.assert_input_data( + bandsplit_ratios, + downsample_strides, + n_conv_modules, + ) + + n_blocks = len(dims) - 1 + n_freq_bins = n_fft // 2 + 1 + subband_shapes, sd_intervals = compute_sd_layer_shapes( + input_shape=n_freq_bins, + bandsplit_ratios=bandsplit_ratios, + downsample_strides=downsample_strides, + n_layers=n_blocks, + ) + self.sd_blocks = nn.ModuleList( + SDBlock( + input_dim=dims[i], + output_dim=dims[i + 1], + bandsplit_ratios=bandsplit_ratios, + downsample_strides=downsample_strides, + n_conv_modules=n_conv_modules, + ) + for i in range(n_blocks) + ) + self.dualpath_blocks = DualPathRNN( + n_layers=n_rnn_layers, + input_dim=dims[-1], + hidden_dim=rnn_hidden_dim, + **kwargs + ) + self.su_blocks = nn.ModuleList( + SUBlock( + input_dim=dims[i + 1], + output_dim=dims[i] if i != 0 else dims[i] * n_sources, + subband_shapes=subband_shapes[i], + sd_intervals=sd_intervals[i], + upsample_strides=downsample_strides, + ) + for i in reversed(range(n_blocks)) + ) + self.gcr = compute_gcr(subband_shapes) + + self.stft_kwargs = dict( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + normalized=stft_normalized + ) + + self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), win_length) + self.n_sources = n_sources + self.hop_length = hop_length + + @staticmethod + def assert_input_data(*args): + """ + Asserts that the shapes of input features are equal. + """ + for arg1 in args: + for arg2 in args: + if len(arg1) != len(arg2): + raise ValueError( + f"Shapes of input features {arg1} and {arg2} are not equal." + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Performs forward pass through the SCNet. + + Args: + - x (torch.Tensor): Input tensor of shape (B, C, T). + + Returns: + - torch.Tensor: Output tensor of shape (B, N, C, T). + """ + + device = x.device + stft_window = self.stft_window_fn(device=device) + + if x.ndim == 2: + x = rearrange(x, 'b t -> b 1 t') + + c = x.shape[1] + + stft_pad = self.hop_length - x.shape[-1] % self.hop_length + x = F.pad(x, (0, stft_pad)) + + # stft + x, ps = pack_one(x, '* t') + x = torch.stft(x, **self.stft_kwargs, window=stft_window, return_complex=True) + x = torch.view_as_real(x) + x = unpack_one(x, ps, '* c f t') + x = rearrange(x, 'b c f t r -> b f t (c r)') + + # encoder part + x_skips = [] + for sd_block in self.sd_blocks: + x, x_skip = sd_block(x) + x_skips.append(x_skip) + + # separation part + x = self.dualpath_blocks(x) + + # decoder part + for su_block, x_skip in zip(self.su_blocks, reversed(x_skips)): + x = su_block(x, x_skip) + + # istft + x = rearrange(x, 'b f t (c r n) -> b n c f t r', c=c, n=self.n_sources, r=2) + x = x.contiguous() + + x = torch.view_as_complex(x) + x = rearrange(x, 'b n c f t -> (b n c) f t') + x = torch.istft(x, **self.stft_kwargs, window=stft_window, return_complex=False) + x = rearrange(x, '(b n c) t -> b n c t', c=c, n=self.n_sources) + + x = x[..., :-stft_pad] + + return x diff --git a/data_pipeline/seperation/models/scnet_unofficial/utils.py b/data_pipeline/seperation/models/scnet_unofficial/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aae1afcd52e8088926ea984e52c9b62ca68be65c --- /dev/null +++ b/data_pipeline/seperation/models/scnet_unofficial/utils.py @@ -0,0 +1,135 @@ +''' +SCNet - great paper, great implementation +https://arxiv.org/pdf/2401.13276.pdf +https://github.com/amanteur/SCNet-PyTorch +''' + +from typing import List, Tuple, Union + +import torch + + +def create_intervals( + splits: List[Union[float, int]] +) -> List[Union[Tuple[float, float], Tuple[int, int]]]: + """ + Create intervals based on splits provided. + + Args: + - splits (List[Union[float, int]]): List of floats or integers representing splits. + + Returns: + - List[Union[Tuple[float, float], Tuple[int, int]]]: List of tuples representing intervals. + """ + start = 0 + return [(start, start := start + split) for split in splits] + + +def get_conv_output_shape( + input_shape: int, + kernel_size: int = 1, + padding: int = 0, + dilation: int = 1, + stride: int = 1, +) -> int: + """ + Compute the output shape of a convolutional layer. + + Args: + - input_shape (int): Input shape. + - kernel_size (int, optional): Kernel size of the convolution. Default is 1. + - padding (int, optional): Padding size. Default is 0. + - dilation (int, optional): Dilation factor. Default is 1. + - stride (int, optional): Stride value. Default is 1. + + Returns: + - int: Output shape. + """ + return int( + (input_shape + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1 + ) + + +def get_convtranspose_output_padding( + input_shape: int, + output_shape: int, + kernel_size: int = 1, + padding: int = 0, + dilation: int = 1, + stride: int = 1, +) -> int: + """ + Compute the output padding for a convolution transpose operation. + + Args: + - input_shape (int): Input shape. + - output_shape (int): Desired output shape. + - kernel_size (int, optional): Kernel size of the convolution. Default is 1. + - padding (int, optional): Padding size. Default is 0. + - dilation (int, optional): Dilation factor. Default is 1. + - stride (int, optional): Stride value. Default is 1. + + Returns: + - int: Output padding. + """ + return ( + output_shape + - (input_shape - 1) * stride + + 2 * padding + - dilation * (kernel_size - 1) + - 1 + ) + + +def compute_sd_layer_shapes( + input_shape: int, + bandsplit_ratios: List[float], + downsample_strides: List[int], + n_layers: int, +) -> Tuple[List[List[int]], List[List[Tuple[int, int]]]]: + """ + Compute the shapes for the subband layers. + + Args: + - input_shape (int): Input shape. + - bandsplit_ratios (List[float]): Ratios for splitting the frequency bands. + - downsample_strides (List[int]): Strides for downsampling in each layer. + - n_layers (int): Number of layers. + + Returns: + - Tuple[List[List[int]], List[List[Tuple[int, int]]]]: Tuple containing subband shapes and convolution shapes. + """ + bandsplit_shapes_list = [] + conv2d_shapes_list = [] + for _ in range(n_layers): + bandsplit_intervals = create_intervals(bandsplit_ratios) + bandsplit_shapes = [ + int(right * input_shape) - int(left * input_shape) + for left, right in bandsplit_intervals + ] + conv2d_shapes = [ + get_conv_output_shape(bs, stride=ds) + for bs, ds in zip(bandsplit_shapes, downsample_strides) + ] + input_shape = sum(conv2d_shapes) + bandsplit_shapes_list.append(bandsplit_shapes) + conv2d_shapes_list.append(create_intervals(conv2d_shapes)) + + return bandsplit_shapes_list, conv2d_shapes_list + + +def compute_gcr(subband_shapes: List[List[int]]) -> float: + """ + Compute the global compression ratio. + + Args: + - subband_shapes (List[List[int]]): List of subband shapes. + + Returns: + - float: Global compression ratio. + """ + t = torch.Tensor(subband_shapes) + gcr = torch.stack( + [(1 - t[i + 1] / t[i]).mean() for i in range(0, len(t) - 1)] + ).mean() + return float(gcr) \ No newline at end of file diff --git a/data_pipeline/seperation/models/segm_models.py b/data_pipeline/seperation/models/segm_models.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a91a62956518c4b4e30807ce7ad11c0938a842 --- /dev/null +++ b/data_pipeline/seperation/models/segm_models.py @@ -0,0 +1,263 @@ +if __name__ == '__main__': + import os + + gpu_use = "2" + print('GPU use: {}'.format(gpu_use)) + os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_use) + + +import torch +import torch.nn as nn +import segmentation_models_pytorch as smp + + +class STFT: + def __init__(self, config): + self.n_fft = config.n_fft + self.hop_length = config.hop_length + self.window = torch.hann_window(window_length=self.n_fft, periodic=True) + self.dim_f = config.dim_f + + def __call__(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-2] + c, t = x.shape[-2:] + x = x.reshape([-1, t]) + x = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + window=window, + center=True, + return_complex=True + ) + x = torch.view_as_real(x) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([*batch_dims, c, 2, -1, x.shape[-1]]).reshape([*batch_dims, c * 2, -1, x.shape[-1]]) + return x[..., :self.dim_f, :] + + def inverse(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-3] + c, f, t = x.shape[-3:] + n = self.n_fft // 2 + 1 + f_pad = torch.zeros([*batch_dims, c, n - f, t]).to(x.device) + x = torch.cat([x, f_pad], -2) + x = x.reshape([*batch_dims, c // 2, 2, n, t]).reshape([-1, 2, n, t]) + x = x.permute([0, 2, 3, 1]) + x = x[..., 0] + x[..., 1] * 1.j + x = torch.istft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + window=window, + center=True + ) + x = x.reshape([*batch_dims, 2, -1]) + return x + + +def get_act(act_type): + if act_type == 'gelu': + return nn.GELU() + elif act_type == 'relu': + return nn.ReLU() + elif act_type[:3] == 'elu': + alpha = float(act_type.replace('elu', '')) + return nn.ELU(alpha) + else: + raise Exception + + +def get_decoder(config, c): + decoder = None + decoder_options = dict() + if config.model.decoder_type == 'unet': + try: + decoder_options = dict(config.decoder_unet) + except: + pass + decoder = smp.Unet( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'fpn': + try: + decoder_options = dict(config.decoder_fpn) + except: + pass + decoder = smp.FPN( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'unet++': + try: + decoder_options = dict(config.decoder_unet_plus_plus) + except: + pass + decoder = smp.UnetPlusPlus( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'manet': + try: + decoder_options = dict(config.decoder_manet) + except: + pass + decoder = smp.MAnet( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'linknet': + try: + decoder_options = dict(config.decoder_linknet) + except: + pass + decoder = smp.Linknet( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'pspnet': + try: + decoder_options = dict(config.decoder_pspnet) + except: + pass + decoder = smp.PSPNet( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'pspnet': + try: + decoder_options = dict(config.decoder_pspnet) + except: + pass + decoder = smp.PSPNet( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'pan': + try: + decoder_options = dict(config.decoder_pan) + except: + pass + decoder = smp.PAN( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'deeplabv3': + try: + decoder_options = dict(config.decoder_deeplabv3) + except: + pass + decoder = smp.DeepLabV3( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + elif config.model.decoder_type == 'deeplabv3plus': + try: + decoder_options = dict(config.decoder_deeplabv3plus) + except: + pass + decoder = smp.DeepLabV3Plus( + encoder_name=config.model.encoder_name, + encoder_weights="imagenet", + in_channels=c, + classes=c, + **decoder_options, + ) + return decoder + + +class Segm_Models_Net(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + + act = get_act(act_type=config.model.act) + + self.num_target_instruments = 1 if config.training.target_instrument else len(config.training.instruments) + self.num_subbands = config.model.num_subbands + + dim_c = self.num_subbands * config.audio.num_channels * 2 + c = config.model.num_channels + f = config.audio.dim_f // self.num_subbands + + self.first_conv = nn.Conv2d(dim_c, c, 1, 1, 0, bias=False) + + self.unet_model = get_decoder(config, c) + + self.final_conv = nn.Sequential( + nn.Conv2d(c + dim_c, c, 1, 1, 0, bias=False), + act, + nn.Conv2d(c, self.num_target_instruments * dim_c, 1, 1, 0, bias=False) + ) + + self.stft = STFT(config.audio) + + def cac2cws(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c, k, f // k, t) + x = x.reshape(b, c * k, f // k, t) + return x + + def cws2cac(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c // k, k, f, t) + x = x.reshape(b, c // k, f * k, t) + return x + + def forward(self, x): + + x = self.stft(x) + + mix = x = self.cac2cws(x) + + first_conv_out = x = self.first_conv(x) + + x = x.transpose(-1, -2) + + x = self.unet_model(x) + + x = x.transpose(-1, -2) + + x = x * first_conv_out # reduce artifacts + + x = self.final_conv(torch.cat([mix, x], 1)) + + x = self.cws2cac(x) + + if self.num_target_instruments > 1: + b, c, f, t = x.shape + x = x.reshape(b, self.num_target_instruments, -1, f, t) + + x = self.stft.inverse(x) + return x diff --git a/data_pipeline/seperation/models/upernet_swin_transformers.py b/data_pipeline/seperation/models/upernet_swin_transformers.py new file mode 100644 index 0000000000000000000000000000000000000000..d20e289b43b15dd279fa5f3844ba25fb99eaace8 --- /dev/null +++ b/data_pipeline/seperation/models/upernet_swin_transformers.py @@ -0,0 +1,228 @@ +from functools import partial +import torch +import torch.nn as nn +from transformers import UperNetForSemanticSegmentation + + +class STFT: + def __init__(self, config): + self.n_fft = config.n_fft + self.hop_length = config.hop_length + self.window = torch.hann_window(window_length=self.n_fft, periodic=True) + self.dim_f = config.dim_f + + def __call__(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-2] + c, t = x.shape[-2:] + x = x.reshape([-1, t]) + x = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + window=window, + center=True, + return_complex=True + ) + x = torch.view_as_real(x) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([*batch_dims, c, 2, -1, x.shape[-1]]).reshape([*batch_dims, c * 2, -1, x.shape[-1]]) + return x[..., :self.dim_f, :] + + def inverse(self, x): + window = self.window.to(x.device) + batch_dims = x.shape[:-3] + c, f, t = x.shape[-3:] + n = self.n_fft // 2 + 1 + f_pad = torch.zeros([*batch_dims, c, n - f, t]).to(x.device) + x = torch.cat([x, f_pad], -2) + x = x.reshape([*batch_dims, c // 2, 2, n, t]).reshape([-1, 2, n, t]) + x = x.permute([0, 2, 3, 1]) + x = x[..., 0] + x[..., 1] * 1.j + x = torch.istft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + window=window, + center=True + ) + x = x.reshape([*batch_dims, 2, -1]) + return x + + +def get_norm(norm_type): + def norm(c, norm_type): + if norm_type == 'BatchNorm': + return nn.BatchNorm2d(c) + elif norm_type == 'InstanceNorm': + return nn.InstanceNorm2d(c, affine=True) + elif 'GroupNorm' in norm_type: + g = int(norm_type.replace('GroupNorm', '')) + return nn.GroupNorm(num_groups=g, num_channels=c) + else: + return nn.Identity() + + return partial(norm, norm_type=norm_type) + + +def get_act(act_type): + if act_type == 'gelu': + return nn.GELU() + elif act_type == 'relu': + return nn.ReLU() + elif act_type[:3] == 'elu': + alpha = float(act_type.replace('elu', '')) + return nn.ELU(alpha) + else: + raise Exception + + +class Upscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.ConvTranspose2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class Downscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class TFC_TDF(nn.Module): + def __init__(self, in_c, c, l, f, bn, norm, act): + super().__init__() + + self.blocks = nn.ModuleList() + for i in range(l): + block = nn.Module() + + block.tfc1 = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_c, c, 3, 1, 1, bias=False), + ) + block.tdf = nn.Sequential( + norm(c), + act, + nn.Linear(f, f // bn, bias=False), + norm(c), + act, + nn.Linear(f // bn, f, bias=False), + ) + block.tfc2 = nn.Sequential( + norm(c), + act, + nn.Conv2d(c, c, 3, 1, 1, bias=False), + ) + block.shortcut = nn.Conv2d(in_c, c, 1, 1, 0, bias=False) + + self.blocks.append(block) + in_c = c + + def forward(self, x): + for block in self.blocks: + s = block.shortcut(x) + x = block.tfc1(x) + x = x + block.tdf(x) + x = block.tfc2(x) + x = x + s + return x + + +class Swin_UperNet_Model(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + + act = get_act(act_type=config.model.act) + + self.num_target_instruments = 1 if config.training.target_instrument else len(config.training.instruments) + self.num_subbands = config.model.num_subbands + + dim_c = self.num_subbands * config.audio.num_channels * 2 + c = config.model.num_channels + f = config.audio.dim_f // self.num_subbands + + self.first_conv = nn.Conv2d(dim_c, c, 1, 1, 0, bias=False) + + self.swin_upernet_model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-large") + + self.swin_upernet_model.auxiliary_head.classifier = nn.Conv2d(256, c, kernel_size=(1, 1), stride=(1, 1)) + self.swin_upernet_model.decode_head.classifier = nn.Conv2d(512, c, kernel_size=(1, 1), stride=(1, 1)) + self.swin_upernet_model.backbone.embeddings.patch_embeddings.projection = nn.Conv2d(c, 192, kernel_size=(4, 4), stride=(4, 4)) + + self.final_conv = nn.Sequential( + nn.Conv2d(c + dim_c, c, 1, 1, 0, bias=False), + act, + nn.Conv2d(c, self.num_target_instruments * dim_c, 1, 1, 0, bias=False) + ) + + self.stft = STFT(config.audio) + + def cac2cws(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c, k, f // k, t) + x = x.reshape(b, c * k, f // k, t) + return x + + def cws2cac(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c // k, k, f, t) + x = x.reshape(b, c // k, f * k, t) + return x + + def forward(self, x): + + x = self.stft(x) + + mix = x = self.cac2cws(x) + + first_conv_out = x = self.first_conv(x) + + x = x.transpose(-1, -2) + + x = self.swin_upernet_model(x).logits + + x = x.transpose(-1, -2) + + x = x * first_conv_out # reduce artifacts + + x = self.final_conv(torch.cat([mix, x], 1)) + + x = self.cws2cac(x) + + if self.num_target_instruments > 1: + b, c, f, t = x.shape + x = x.reshape(b, self.num_target_instruments, -1, f, t) + + x = self.stft.inverse(x) + return x + + +if __name__ == "__main__": + model = UperNetForSemanticSegmentation.from_pretrained("./results/", ignore_mismatched_sizes=True) + print(model) + print(model.auxiliary_head.classifier) + print(model.decode_head.classifier) + + x = torch.zeros((2, 16, 512, 512), dtype=torch.float32) + res = model(x) + print(res.logits.shape) + model.save_pretrained('./results/') \ No newline at end of file diff --git a/data_pipeline/seperation/requirements.txt b/data_pipeline/seperation/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8f9222d4295418c23357ba4620eca9e024276a7 --- /dev/null +++ b/data_pipeline/seperation/requirements.txt @@ -0,0 +1,24 @@ +torch +numpy +pandas +scipy +soundfile +ml_collections +tqdm +segmentation_models_pytorch==0.3.3 +timm==0.9.2 +audiomentations==0.24.0 +pedalboard==0.8.1 +omegaconf==2.2.3 +beartype==0.14.1 +rotary_embedding_torch==0.3.5 +einops==0.6.1 +librosa +demucs==4.0.0 +transformers==4.35.0 +torchmetrics==0.11.4 +spafe==0.3.2 +protobuf==3.20.3 +torch_audiomentations +asteroid==0.7.0 +auraloss diff --git a/data_pipeline/seperation/train.py b/data_pipeline/seperation/train.py new file mode 100644 index 0000000000000000000000000000000000000000..1820006715367c46e409d7e63a4354b2a25e22ca --- /dev/null +++ b/data_pipeline/seperation/train.py @@ -0,0 +1,515 @@ +# coding: utf-8 +__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/' +__version__ = '1.0.3' + +import random +import argparse +import time +import copy +from tqdm import tqdm +import sys +import os +import glob +import torch +import soundfile as sf +import numpy as np +import auraloss +import torch.nn as nn +from torch.optim import Adam, AdamW, SGD +from torch.utils.data import DataLoader +from torch.cuda.amp.grad_scaler import GradScaler +from torch.optim.lr_scheduler import ReduceLROnPlateau +import torch.nn.functional as F + +from dataset import MSSDataset +from utils import demix_track, demix_track_demucs, sdr, get_model_from_config + +import warnings + +warnings.filterwarnings("ignore") + + +def masked_loss(y_, y, q, coarse=True): + # shape = [num_sources, batch_size, num_channels, chunk_size] + loss = torch.nn.MSELoss(reduction='none')(y_, y).transpose(0, 1) + if coarse: + loss = torch.mean(loss, dim=(-1, -2)) + loss = loss.reshape(loss.shape[0], -1) + L = loss.detach() + quantile = torch.quantile(L, q, interpolation='linear', dim=1, keepdim=True) + mask = L < quantile + return (loss * mask).mean() + + +def manual_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if multi-GPU + torch.backends.cudnn.deterministic = True + os.environ["PYTHONHASHSEED"] = str(seed) + + +def load_not_compatible_weights(model, weights, verbose=False): + new_model = model.state_dict() + old_model = torch.load(weights) + if 'state' in old_model: + # Fix for htdemucs weights loading + old_model = old_model['state'] + + for el in new_model: + if el in old_model: + if verbose: + print('Match found for {}!'.format(el)) + if new_model[el].shape == old_model[el].shape: + if verbose: + print('Action: Just copy weights!') + new_model[el] = old_model[el] + else: + if len(new_model[el].shape) != len(old_model[el].shape): + if verbose: + print('Action: Different dimension! Too lazy to write the code... Skip it') + else: + if verbose: + print('Shape is different: {} != {}'.format(tuple(new_model[el].shape), tuple(old_model[el].shape))) + ln = len(new_model[el].shape) + max_shape = [] + slices_old = [] + slices_new = [] + for i in range(ln): + max_shape.append(max(new_model[el].shape[i], old_model[el].shape[i])) + slices_old.append(slice(0, old_model[el].shape[i])) + slices_new.append(slice(0, new_model[el].shape[i])) + # print(max_shape) + # print(slices_old, slices_new) + slices_old = tuple(slices_old) + slices_new = tuple(slices_new) + max_matrix = np.zeros(max_shape, dtype=np.float32) + for i in range(ln): + max_matrix[slices_old] = old_model[el].cpu().numpy() + max_matrix = torch.from_numpy(max_matrix) + new_model[el] = max_matrix[slices_new] + else: + if verbose: + print('Match not found for {}!'.format(el)) + model.load_state_dict( + new_model + ) + +def valid(model, args, config, device, verbose=False): + # For multiGPU extract single model + if len(args.device_ids) > 1: + model = model.module + + model.eval() + all_mixtures_path = [] + for valid_path in args.valid_path: + part = sorted(glob.glob(valid_path + '/*/mixture.wav')) + if len(part) == 0: + print('No validation data found in: {}'.format(valid_path)) + all_mixtures_path += part + if verbose: + print('Total mixtures: {}'.format(len(all_mixtures_path))) + + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + + if not verbose: + all_mixtures_path = tqdm(all_mixtures_path) + + pbar_dict = {} + for path in all_mixtures_path: + mix, sr = sf.read(path) + folder = os.path.dirname(path) + if verbose: + print('Song: {}'.format(os.path.basename(folder))) + mixture = torch.tensor(mix.T, dtype=torch.float32) + if args.model_type == 'htdemucs': + res = demix_track_demucs(config, model, mixture, device) + else: + res = demix_track(config, model, mixture, device) + for instr in instruments: + if instr != 'other' or config.training.other_fix is False: + track, sr1 = sf.read(folder + '/{}.wav'.format(instr)) + else: + # other is actually instrumental + track, sr1 = sf.read(folder + '/{}.wav'.format('vocals')) + track = mix - track + # sf.write("{}.wav".format(instr), res[instr].T, sr, subtype='FLOAT') + references = np.expand_dims(track, axis=0) + estimates = np.expand_dims(res[instr].T, axis=0) + sdr_val = sdr(references, estimates)[0] + if verbose: + print(instr, res[instr].shape, sdr_val) + all_sdr[instr].append(sdr_val) + pbar_dict['sdr_{}'.format(instr)] = sdr_val + if not verbose: + all_mixtures_path.set_postfix(pbar_dict) + + sdr_avg = 0.0 + for instr in instruments: + sdr_val = np.array(all_sdr[instr]).mean() + print("Instr SDR {}: {:.4f}".format(instr, sdr_val)) + sdr_avg += sdr_val + sdr_avg /= len(instruments) + if len(instruments) > 1: + print('SDR Avg: {:.4f}'.format(sdr_avg)) + return sdr_avg + + +def proc_list_of_files( + mixture_paths, + model, + args, + config, + device, + verbose=False, +): + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + + for path in mixture_paths: + mix, sr = sf.read(path) + folder = os.path.dirname(path) + folder_name = os.path.abspath(folder) + if verbose: + print('Song: {}'.format(folder_name)) + mixture = torch.tensor(mix.T, dtype=torch.float32) + if args.model_type == 'htdemucs': + res = demix_track_demucs(config, model, mixture, device) + else: + res = demix_track(config, model, mixture, device) + if 1: + pbar_dict = {} + for instr in instruments: + if instr != 'other' or config.training.other_fix is False: + try: + track, sr1 = sf.read(folder + '/{}.wav'.format(instr)) + except Exception as e: + # print('No data for stem: {}. Skip!'.format(instr)) + continue + else: + # other is actually instrumental + track, sr1 = sf.read(folder + '/{}.wav'.format('vocals')) + track = mix - track + + references = np.expand_dims(track, axis=0) + estimates = np.expand_dims(res[instr].T, axis=0) + sdr_val = sdr(references, estimates)[0] + if verbose: + print(instr, res[instr].shape, sdr_val) + all_sdr[instr].append(sdr_val) + pbar_dict['sdr_{}'.format(instr)] = sdr_val + + try: + mixture_paths.set_postfix(pbar_dict) + except Exception as e: + pass + + return all_sdr + + +def valid_mp(proc_id, queue, all_mixtures_path, model, args, config, device, return_dict): + m1 = model + # m1 = copy.deepcopy(m1) + m1 = m1.eval().to(device) + if proc_id == 0: + progress_bar = tqdm(total=len(all_mixtures_path)) + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + while True: + current_step, path = queue.get() + if path is None: # check for sentinel value + break + sdr_single = proc_list_of_files([path], m1, args, config, device, False) + pbar_dict = {} + for instr in config.training.instruments: + all_sdr[instr] += sdr_single[instr] + if len(sdr_single[instr]) > 0: + pbar_dict['sdr_{}'.format(instr)] = "{:.4f}".format(sdr_single[instr][0]) + if proc_id == 0: + progress_bar.update(current_step - progress_bar.n) + progress_bar.set_postfix(pbar_dict) + # print(f"Inference on process {proc_id}", all_sdr) + return_dict[proc_id] = all_sdr + return + + +def valid_multi_gpu(model, args, config, verbose=False): + device_ids = args.device_ids + model = model.to('cpu') + + # For multiGPU extract single model + if len(device_ids) > 1: + model = model.module + + all_mixtures_path = [] + for valid_path in args.valid_path: + part = sorted(glob.glob(valid_path + '/*/mixture.wav')) + if len(part) == 0: + print('No validation data found in: {}'.format(valid_path)) + all_mixtures_path += part + + model = model.to('cpu') + torch.cuda.empty_cache() + queue = torch.multiprocessing.Queue() + processes = [] + return_dict = torch.multiprocessing.Manager().dict() + for i, device in enumerate(device_ids): + if torch.cuda.is_available(): + device = 'cuda:{}'.format(device) + else: + device = 'cpu' + p = torch.multiprocessing.Process(target=valid_mp, args=(i, queue, all_mixtures_path, model, args, config, device, return_dict)) + p.start() + processes.append(p) + for i, path in enumerate(all_mixtures_path): + queue.put((i, path)) + for _ in range(len(device_ids)): + queue.put((None, None)) # sentinel value to signal subprocesses to exit + for p in processes: + p.join() # wait for all subprocesses to finish + + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + for i in range(len(device_ids)): + all_sdr[instr] += return_dict[i][instr] + + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + sdr_avg = 0.0 + for instr in instruments: + sdr_val = np.array(all_sdr[instr]).mean() + print("Instr SDR {}: {:.4f}".format(instr, sdr_val)) + sdr_avg += sdr_val + sdr_avg /= len(instruments) + if len(instruments) > 1: + print('SDR Avg: {:.4f}'.format(sdr_avg)) + return sdr_avg + + +def train_model(args): + parser = argparse.ArgumentParser() + parser.add_argument("--model_type", type=str, default='mdx23c', help="One of mdx23c, htdemucs, segm_models, mel_band_roformer, bs_roformer, swin_upernet, bandit") + parser.add_argument("--config_path", type=str, help="path to config file") + parser.add_argument("--start_check_point", type=str, default='', help="Initial checkpoint to start training") + parser.add_argument("--results_path", type=str, help="path to folder where results will be stored (weights, metadata)") + parser.add_argument("--data_path", nargs="+", type=str, help="Dataset data paths. You can provide several folders.") + parser.add_argument("--dataset_type", type=int, default=1, help="Dataset type. Must be one of: 1, 2, 3 or 4. Details here: https://github.com/ZFTurbo/Music-Source-Separation-Training/blob/main/docs/dataset_types.md") + parser.add_argument("--valid_path", nargs="+", type=str, help="validation data paths. You can provide several folders.") + parser.add_argument("--num_workers", type=int, default=0, help="dataloader num_workers") + parser.add_argument("--pin_memory", type=bool, default=False, help="dataloader pin_memory") + parser.add_argument("--seed", type=int, default=0, help="random seed") + parser.add_argument("--device_ids", nargs='+', type=int, default=[0], help='list of gpu ids') + parser.add_argument("--use_multistft_loss", action='store_true', help="Use MultiSTFT Loss (from auraloss package)") + parser.add_argument("--use_mse_loss", action='store_true', help="Use default MSE loss") + parser.add_argument("--use_l1_loss", action='store_true', help="Use L1 loss") + if args is None: + args = parser.parse_args() + else: + args = parser.parse_args(args) + + manual_seed(args.seed + int(time.time())) + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = False # Fix possible slow down with dilation convolutions + torch.multiprocessing.set_start_method('spawn') + + model, config = get_model_from_config(args.model_type, args.config_path) + print("Instruments: {}".format(config.training.instruments)) + + if not os.path.isdir(args.results_path): + os.mkdir(args.results_path) + + use_amp = True + try: + use_amp = config.training.use_amp + except: + pass + + device_ids = args.device_ids + batch_size = config.training.batch_size * len(device_ids) + + trainset = MSSDataset( + config, + args.data_path, + batch_size=batch_size, + metadata_path=os.path.join(args.results_path, 'metadata_{}.pkl'.format(args.dataset_type)), + dataset_type=args.dataset_type, + ) + + train_loader = DataLoader( + trainset, + batch_size=batch_size, + shuffle=True, + num_workers=args.num_workers, + pin_memory=args.pin_memory + ) + + if args.start_check_point != '': + print('Start from checkpoint: {}'.format(args.start_check_point)) + if 1: + load_not_compatible_weights(model, args.start_check_point, verbose=False) + else: + model.load_state_dict( + torch.load(args.start_check_point) + ) + + if torch.cuda.is_available(): + if len(device_ids) <= 1: + print('Use single GPU: {}'.format(device_ids)) + device = torch.device(f'cuda:{device_ids[0]}') + model = model.to(device) + else: + print('Use multi GPU: {}'.format(device_ids)) + device = torch.device(f'cuda:{device_ids[0]}') + model = nn.DataParallel(model, device_ids=device_ids).to(device) + else: + device = 'cpu' + print('CUDA is not avilable. Run training on CPU. It will be very slow...') + model = model.to(device) + + if 0: + valid_multi_gpu(model, args, config, verbose=True) + + if config.training.optimizer == 'adam': + optimizer = Adam(model.parameters(), lr=config.training.lr) + elif config.training.optimizer == 'adamw': + optimizer = AdamW(model.parameters(), lr=config.training.lr) + elif config.training.optimizer == 'sgd': + print('Use SGD optimizer') + optimizer = SGD(model.parameters(), lr=config.training.lr, momentum=0.999) + else: + print('Unknown optimizer: {}'.format(config.training.optimizer)) + exit() + + gradient_accumulation_steps = 1 + try: + gradient_accumulation_steps = int(config.training.gradient_accumulation_steps) + except: + pass + + print("Patience: {} Reduce factor: {} Batch size: {} Grad accum steps: {} Effective batch size: {}".format( + config.training.patience, + config.training.reduce_factor, + batch_size, + gradient_accumulation_steps, + batch_size * gradient_accumulation_steps, + )) + # Reduce LR if no SDR improvements for several epochs + scheduler = ReduceLROnPlateau(optimizer, 'max', patience=config.training.patience, factor=config.training.reduce_factor) + + if args.use_multistft_loss: + try: + loss_options = dict(config.loss_multistft) + except: + loss_options = dict() + print('Loss options: {}'.format(loss_options)) + loss_multistft = auraloss.freq.MultiResolutionSTFTLoss( + **loss_options + ) + + scaler = GradScaler() + print('Train for: {}'.format(config.training.num_epochs)) + best_sdr = -100 + for epoch in range(config.training.num_epochs): + model.train().to(device) + print('Train epoch: {} Learning rate: {}'.format(epoch, optimizer.param_groups[0]['lr'])) + loss_val = 0. + total = 0 + + # total_loss = None + pbar = tqdm(train_loader) + for i, (batch, mixes) in enumerate(pbar): + y = batch.to(device) + x = mixes.to(device) # mixture + + with torch.cuda.amp.autocast(enabled=use_amp): + if args.model_type in ['mel_band_roformer', 'bs_roformer']: + # loss is computed in forward pass + loss = model(x, y) + if type(device_ids) != int: + # If it's multiple GPUs sum partial loss + loss = loss.mean() + else: + y_ = model(x) + if args.use_multistft_loss: + y1_ = torch.reshape(y_, (y_.shape[0], y_.shape[1] * y_.shape[2], y_.shape[3])) + y1 = torch.reshape(y, (y.shape[0], y.shape[1] * y.shape[2], y.shape[3])) + loss = loss_multistft(y1_, y1) + # We can use many losses at the same time + if args.use_mse_loss: + loss += 1000 * nn.MSELoss()(y1_, y1) + if args.use_l1_loss: + loss += 1000 * F.l1_loss(y1_, y1) + elif args.use_mse_loss: + loss = nn.MSELoss()(y_, y) + elif args.use_l1_loss: + loss = F.l1_loss(y_, y) + else: + loss = masked_loss( + y_, + y, + q=config.training.q, + coarse=config.training.coarse_loss_clip + ) + + loss /= gradient_accumulation_steps + scaler.scale(loss).backward() + if config.training.grad_clip: + nn.utils.clip_grad_norm_(model.parameters(), config.training.grad_clip) + + if ((i + 1) % gradient_accumulation_steps == 0) or (i == len(train_loader) - 1): + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad(set_to_none=True) + + li = loss.item() * gradient_accumulation_steps + loss_val += li + total += 1 + pbar.set_postfix({'loss': 100 * li, 'avg_loss': 100 * loss_val / (i + 1)}) + loss.detach() + + print('Training loss: {:.6f}'.format(loss_val / total)) + + # Save last + store_path = args.results_path + '/last_{}.ckpt'.format(args.model_type) + state_dict = model.state_dict() if len(device_ids) <= 1 else model.module.state_dict() + torch.save( + state_dict, + store_path + ) + + # if you have problem with multiproc validation change 0 to 1 + if 0: + sdr_avg = valid(model, args, config, device, verbose=False) + else: + sdr_avg = valid_multi_gpu(model, args, config, verbose=False) + if sdr_avg > best_sdr: + store_path = args.results_path + '/model_{}_ep_{}_sdr_{:.4f}.ckpt'.format(args.model_type, epoch, sdr_avg) + print('Store weights: {}'.format(store_path)) + state_dict = model.state_dict() if len(device_ids) <= 1 else model.module.state_dict() + torch.save( + state_dict, + store_path + ) + best_sdr = sdr_avg + scheduler.step(sdr_avg) + + +if __name__ == "__main__": + train_model(None) diff --git a/data_pipeline/seperation/utils.py b/data_pipeline/seperation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3541deb5dc8abceb418996cdae58bc6f3ec9215c --- /dev/null +++ b/data_pipeline/seperation/utils.py @@ -0,0 +1,200 @@ +# coding: utf-8 +__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/' + +import time +import numpy as np +import torch +import torch.nn as nn +import yaml +from ml_collections import ConfigDict +from omegaconf import OmegaConf + + +def get_model_from_config(model_type, config_path): + with open(config_path) as f: + if model_type == 'htdemucs': + config = OmegaConf.load(config_path) + else: + config = ConfigDict(yaml.load(f, Loader=yaml.FullLoader)) + + if model_type == 'mdx23c': + from models.mdx23c_tfc_tdf_v3 import TFC_TDF_net + model = TFC_TDF_net(config) + elif model_type == 'htdemucs': + from models.demucs4ht import get_model + model = get_model(config) + elif model_type == 'segm_models': + from models.segm_models import Segm_Models_Net + model = Segm_Models_Net(config) + elif model_type == 'mel_band_roformer': + from models.bs_roformer import MelBandRoformer + model = MelBandRoformer( + **dict(config.model) + ) + elif model_type == 'bs_roformer': + from models.bs_roformer import BSRoformer + model = BSRoformer( + **dict(config.model) + ) + elif model_type == 'swin_upernet': + from models.upernet_swin_transformers import Swin_UperNet_Model + model = Swin_UperNet_Model(config) + elif model_type == 'bandit': + from models.bandit.core.model import MultiMaskMultiSourceBandSplitRNNSimple + model = MultiMaskMultiSourceBandSplitRNNSimple( + **config.model + ) + elif model_type == 'scnet_unofficial': + from models.scnet_unofficial import SCNet + model = SCNet( + **config.model + ) + elif model_type == 'scnet': + from models.scnet import SCNet + model = SCNet( + **config.model + ) + else: + print('Unknown model: {}'.format(model_type)) + model = None + + return model, config + + +def demix_track(config, model, mix, device): + C = config.audio.chunk_size + N = config.inference.num_overlap + fade_size = C // 10 + step = int(C // N) + border = C - step + batch_size = config.inference.batch_size + + length_init = mix.shape[-1] + + # Do pad from the beginning and end to account floating window results better + if length_init > 2 * border and (border > 0): + mix = nn.functional.pad(mix, (border, border), mode='reflect') + + # Prepare windows arrays (do 1 time for speed up). This trick repairs click problems on the edges of segment + window_size = C + fadein = torch.linspace(0, 1, fade_size) + fadeout = torch.linspace(1, 0, fade_size) + window_start = torch.ones(window_size) + window_middle = torch.ones(window_size) + window_finish = torch.ones(window_size) + window_start[-fade_size:] *= fadeout # First audio chunk, no fadein + window_finish[:fade_size] *= fadein # Last audio chunk, no fadeout + window_middle[-fade_size:] *= fadeout + window_middle[:fade_size] *= fadein + + with torch.cuda.amp.autocast(): + with torch.inference_mode(): + if config.training.target_instrument is not None: + req_shape = (1, ) + tuple(mix.shape) + else: + req_shape = (len(config.training.instruments),) + tuple(mix.shape) + + result = torch.zeros(req_shape, dtype=torch.float32) + counter = torch.zeros(req_shape, dtype=torch.float32) + i = 0 + batch_data = [] + batch_locations = [] + while i < mix.shape[1]: + # print(i, i + C, mix.shape[1]) + part = mix[:, i:i + C].to(device) + length = part.shape[-1] + if length < C: + if length > C // 2 + 1: + part = nn.functional.pad(input=part, pad=(0, C - length), mode='reflect') + else: + part = nn.functional.pad(input=part, pad=(0, C - length, 0, 0), mode='constant', value=0) + batch_data.append(part) + batch_locations.append((i, length)) + i += step + + if len(batch_data) >= batch_size or (i >= mix.shape[1]): + arr = torch.stack(batch_data, dim=0) + x = model(arr) + + window = window_middle + if i - step == 0: # First audio chunk, no fadein + window = window_start + elif i >= mix.shape[1]: # Last audio chunk, no fadeout + window = window_finish + + for j in range(len(batch_locations)): + start, l = batch_locations[j] + result[..., start:start+l] += x[j][..., :l].cpu() * window[..., :l] + counter[..., start:start+l] += window[..., :l] + + batch_data = [] + batch_locations = [] + + estimated_sources = result / counter + estimated_sources = estimated_sources.cpu().numpy() + np.nan_to_num(estimated_sources, copy=False, nan=0.0) + + if length_init > 2 * border and (border > 0): + # Remove pad + estimated_sources = estimated_sources[..., border:-border] + + if config.training.target_instrument is None: + return {k: v for k, v in zip(config.training.instruments, estimated_sources)} + else: + return {k: v for k, v in zip([config.training.target_instrument], estimated_sources)} + + +def demix_track_demucs(config, model, mix, device): + S = len(config.training.instruments) + C = config.training.samplerate * config.training.segment + N = config.inference.num_overlap + batch_size = config.inference.batch_size + step = C // N + # print(S, C, N, step, mix.shape, mix.device) + + with torch.cuda.amp.autocast(enabled=config.training.use_amp): + with torch.inference_mode(): + req_shape = (S, ) + tuple(mix.shape) + result = torch.zeros(req_shape, dtype=torch.float32) + counter = torch.zeros(req_shape, dtype=torch.float32) + i = 0 + batch_data = [] + batch_locations = [] + while i < mix.shape[1]: + # print(i, i + C, mix.shape[1]) + part = mix[:, i:i + C].to(device) + length = part.shape[-1] + if length < C: + part = nn.functional.pad(input=part, pad=(0, C - length, 0, 0), mode='constant', value=0) + batch_data.append(part) + batch_locations.append((i, length)) + i += step + + if len(batch_data) >= batch_size or (i >= mix.shape[1]): + arr = torch.stack(batch_data, dim=0) + x = model(arr) + for j in range(len(batch_locations)): + start, l = batch_locations[j] + result[..., start:start+l] += x[j][..., :l].cpu() + counter[..., start:start+l] += 1. + batch_data = [] + batch_locations = [] + + estimated_sources = result / counter + estimated_sources = estimated_sources.cpu().numpy() + np.nan_to_num(estimated_sources, copy=False, nan=0.0) + + if S > 1: + return {k: v for k, v in zip(config.training.instruments, estimated_sources)} + else: + return estimated_sources + + +def sdr(references, estimates): + # compute SDR for one song + delta = 1e-7 # avoid numerical errors + num = np.sum(np.square(references), axis=(1, 2)) + den = np.sum(np.square(references - estimates), axis=(1, 2)) + num += delta + den += delta + return 10 * np.log10(num / den) diff --git a/data_pipeline/seperation/valid.py b/data_pipeline/seperation/valid.py new file mode 100644 index 0000000000000000000000000000000000000000..91912f9ea28860a78a14a19f97d7e01aa4b5c586 --- /dev/null +++ b/data_pipeline/seperation/valid.py @@ -0,0 +1,256 @@ +# coding: utf-8 +__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/' + +import argparse +import time +from tqdm import tqdm +import sys +import os +import glob +import copy +import torch +import soundfile as sf +import numpy as np +import torch.nn as nn +import multiprocessing + +import warnings +warnings.filterwarnings("ignore") + +from utils import demix_track, demix_track_demucs, sdr, get_model_from_config + + +def proc_list_of_files( + mixture_paths, + model, + args, + config, + device, + verbose=False, + is_tqdm=True +): + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + if args.store_dir != "": + if not os.path.isdir(args.store_dir): + os.mkdir(args.store_dir) + + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + + if is_tqdm: + mixture_paths = tqdm(mixture_paths) + + for path in mixture_paths: + mix, sr = sf.read(path) + folder = os.path.dirname(path) + folder_name = os.path.abspath(folder) + if verbose: + print('Song: {}'.format(folder_name)) + mixture = torch.tensor(mix.T, dtype=torch.float32) + if args.model_type == 'htdemucs': + res = demix_track_demucs(config, model, mixture, device) + else: + res = demix_track(config, model, mixture, device) + if 1: + pbar_dict = {} + for instr in instruments: + if instr != 'other' or config.training.other_fix is False: + try: + track, sr1 = sf.read(folder + '/{}.wav'.format(instr)) + except Exception as e: + # print('No data for stem: {}. Skip!'.format(instr)) + continue + else: + # other is actually instrumental + track, sr1 = sf.read(folder + '/{}.wav'.format('vocals')) + track = mix - track + + if args.store_dir != "": + sf.write("{}/{}_{}.wav".format(args.store_dir, os.path.basename(folder), instr), res[instr].T, sr, + subtype='FLOAT') + references = np.expand_dims(track, axis=0) + estimates = np.expand_dims(res[instr].T, axis=0) + sdr_val = sdr(references, estimates)[0] + if verbose: + print(instr, res[instr].shape, sdr_val) + all_sdr[instr].append(sdr_val) + pbar_dict['sdr_{}'.format(instr)] = sdr_val + + try: + mixture_paths.set_postfix(pbar_dict) + except Exception as e: + pass + + return all_sdr + + +def valid(model, args, config, device, verbose=False): + start_time = time.time() + model.eval().to(device) + all_mixtures_path = glob.glob(args.valid_path + '/*/mixture.wav') + print('Total mixtures: {}'.format(len(all_mixtures_path))) + print('Overlap: {} Batch size: {}'.format(config.inference.num_overlap, config.inference.batch_size)) + + all_sdr = proc_list_of_files(all_mixtures_path, model, args, config, device, verbose, not verbose) + + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + if args.store_dir != "": + out = open(args.store_dir + '/results.txt', 'w') + out.write(str(args) + "\n") + print("Num overlap: {}".format(config.inference.num_overlap)) + sdr_avg = 0.0 + for instr in instruments: + sdr_val = np.array(all_sdr[instr]).mean() + print("Instr SDR {}: {:.4f}".format(instr, sdr_val)) + if args.store_dir != "": + out.write("Instr SDR {}: {:.4f}".format(instr, sdr_val) + "\n") + sdr_avg += sdr_val + sdr_avg /= len(instruments) + if len(instruments) > 1: + print('SDR Avg: {:.4f}'.format(sdr_avg)) + if args.store_dir != "": + out.write('SDR Avg: {:.4f}'.format(sdr_avg) + "\n") + print("Elapsed time: {:.2f} sec".format(time.time() - start_time)) + if args.store_dir != "": + out.write("Elapsed time: {:.2f} sec".format(time.time() - start_time) + "\n") + out.close() + + return sdr_avg + + +def valid_mp(proc_id, queue, all_mixtures_path, model, args, config, device, return_dict): + m1 = model.eval().to(device) + if proc_id == 0: + progress_bar = tqdm(total=len(all_mixtures_path)) + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + while True: + current_step, path = queue.get() + if path is None: # check for sentinel value + break + sdr_single = proc_list_of_files([path], m1, args, config, device, False, False) + pbar_dict = {} + for instr in config.training.instruments: + all_sdr[instr] += sdr_single[instr] + if len(sdr_single[instr]) > 0: + pbar_dict['sdr_{}'.format(instr)] = "{:.4f}".format(sdr_single[instr][0]) + if proc_id == 0: + progress_bar.update(current_step - progress_bar.n) + progress_bar.set_postfix(pbar_dict) + # print(f"Inference on process {proc_id}", all_sdr) + return_dict[proc_id] = all_sdr + return + + +def valid_multi_gpu(model, args, config, device_ids, verbose=False): + start_time = time.time() + all_mixtures_path = glob.glob(args.valid_path + '/*/mixture.wav') + print('Total mixtures: {}'.format(len(all_mixtures_path))) + print('Overlap: {} Batch size: {}'.format(config.inference.num_overlap, config.inference.batch_size)) + + model = model.to('cpu') + queue = torch.multiprocessing.Queue() + processes = [] + return_dict = torch.multiprocessing.Manager().dict() + for i, device in enumerate(device_ids): + if torch.cuda.is_available(): + device = 'cuda:{}'.format(device) + else: + device = 'cpu' + p = torch.multiprocessing.Process(target=valid_mp, args=(i, queue, all_mixtures_path, model, args, config, device, return_dict)) + p.start() + processes.append(p) + for i, path in enumerate(all_mixtures_path): + queue.put((i, path)) + for _ in range(len(device_ids)): + queue.put((None, None)) # sentinel value to signal subprocesses to exit + for p in processes: + p.join() # wait for all subprocesses to finish + + all_sdr = dict() + for instr in config.training.instruments: + all_sdr[instr] = [] + for i in range(len(device_ids)): + all_sdr[instr] += return_dict[i][instr] + + instruments = config.training.instruments + if config.training.target_instrument is not None: + instruments = [config.training.target_instrument] + + if args.store_dir != "": + out = open(args.store_dir + '/results.txt', 'w') + out.write(str(args) + "\n") + print("Num overlap: {}".format(config.inference.num_overlap)) + sdr_avg = 0.0 + for instr in instruments: + sdr_val = np.array(all_sdr[instr]).mean() + print("Instr SDR {}: {:.4f}".format(instr, sdr_val)) + if args.store_dir != "": + out.write("Instr SDR {}: {:.4f}".format(instr, sdr_val) + "\n") + sdr_avg += sdr_val + sdr_avg /= len(instruments) + if len(instruments) > 1: + print('SDR Avg: {:.4f}'.format(sdr_avg)) + if args.store_dir != "": + out.write('SDR Avg: {:.4f}'.format(sdr_avg) + "\n") + print("Elapsed time: {:.2f} sec".format(time.time() - start_time)) + if args.store_dir != "": + out.write("Elapsed time: {:.2f} sec".format(time.time() - start_time) + "\n") + out.close() + + return sdr_avg + + +def check_validation(args): + parser = argparse.ArgumentParser() + parser.add_argument("--model_type", type=str, default='mdx23c', help="One of mdx23c, htdemucs, segm_models, mel_band_roformer, bs_roformer, swin_upernet, bandit") + parser.add_argument("--config_path", type=str, help="path to config file") + parser.add_argument("--start_check_point", type=str, default='', help="Initial checkpoint to valid weights") + parser.add_argument("--valid_path", type=str, help="validate path") + parser.add_argument("--store_dir", default="", type=str, help="path to store results as wav file") + parser.add_argument("--device_ids", nargs='+', type=int, default=0, help='list of gpu ids') + parser.add_argument("--num_workers", type=int, default=0, help="dataloader num_workers") + parser.add_argument("--pin_memory", type=bool, default=False, help="dataloader pin_memory") + if args is None: + args = parser.parse_args() + else: + args = parser.parse_args(args) + + torch.backends.cudnn.benchmark = True + torch.multiprocessing.set_start_method('spawn') + + model, config = get_model_from_config(args.model_type, args.config_path) + if args.start_check_point != '': + print('Start from checkpoint: {}'.format(args.start_check_point)) + state_dict = torch.load(args.start_check_point) + if args.model_type == 'htdemucs': + # Fix for htdemucs pretrained models + if 'state' in state_dict: + state_dict = state_dict['state'] + model.load_state_dict(state_dict) + + print("Instruments: {}".format(config.training.instruments)) + + device_ids = args.device_ids + if torch.cuda.is_available(): + device = torch.device('cuda:0') + else: + device = 'cpu' + print('CUDA is not available. Run validation on CPU. It will be very slow...') + + if torch.cuda.is_available() and len(device_ids) > 1: + valid_multi_gpu(model, args, config, device_ids, verbose=False) + else: + valid(model, args, config, device, verbose=False) + + +if __name__ == "__main__": + check_validation(None) diff --git a/data_pipeline/ssl/dump_km_label.py b/data_pipeline/ssl/dump_km_label.py new file mode 100644 index 0000000000000000000000000000000000000000..ea88ec7b195e63e9797fe27d74b1caaecf5eee34 --- /dev/null +++ b/data_pipeline/ssl/dump_km_label.py @@ -0,0 +1,201 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +import sys + +import numpy as np + +import joblib +import torch + +import torchaudio +import glob +import numpy as np +import torch +import torch.multiprocessing as mp +import torchaudio +import joblib +import librosa +import threading +import math +import numpy as np +import itertools +from tqdm import tqdm +from pathlib import Path +import random +import os +import sys + +LOGGING_INTERVAL = 10 +OFFSET = 0 +NUM_THREADS = 16 +BATCH_SIZE = 1 + + +class ApplyKmeans(object): + def __init__(self, km_path): + self.km_model = joblib.load(km_path) + self.C_np = self.km_model.cluster_centers_.transpose() + self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) + + self.C = torch.from_numpy(self.C_np) + self.Cnorm = torch.from_numpy(self.Cnorm_np) + if torch.cuda.is_available(): + self.C = self.C.cuda() + self.Cnorm = self.Cnorm.cuda() + + def __call__(self, x): + if isinstance(x, torch.Tensor): + dist = ( + x.pow(2).sum(1, keepdim=True) + - 2 * torch.matmul(x, self.C) + + self.Cnorm + ) + return dist.argmin(dim=1).cpu().numpy() + else: + dist = ( + (x ** 2).sum(1, keepdims=True) + - 2 * np.matmul(x, self.C_np) + + self.Cnorm_np + ) + return np.argmin(dist, axis=1) + +INPUT_DIR = sys.argv[1] +FEATURE_OUTPUT_DIR = Path(sys.argv[2]) + +os.makedirs(FEATURE_OUTPUT_DIR, exist_ok=True) + +def inference(rank, queue: mp.Queue): + apply_kmeans = ApplyKmeans("km_xlsr_1024_18l") + + + while True: + paths = queue.get() + if paths is None: + break + file_path = paths[0] + file_name = os.path.basename(file_path) + + try: + feat = np.load(file_path) + km_feat = apply_kmeans(feat) + np.save(FEATURE_OUTPUT_DIR / f"{file_name}", km_feat) # [:length, :]) + + except Exception as e: + print(e) + raise + #print(f"{e} in {paths} with longest length of {max(lengths)}") + + + + + + + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + + +last_batches = None + + +@setInterval(LOGGING_INTERVAL) +def QueueWatcher(queue): + global last_batches + curr_batches = queue.qsize() + print( + f"Remain: {curr_batches} batches [ {(last_batches-curr_batches)/LOGGING_INTERVAL} batches/s ]" + ) + last_batches = curr_batches + + +if __name__ == "__main__": + mp.set_start_method('spawn',force=True) + FEATURE_OUTPUT_DIR.mkdir(exist_ok=True) + + gpu_num = torch.cuda.device_count() + + + print(f"Running with {NUM_THREADS} threads and batchsize {BATCH_SIZE}") + processes = [] + queue = mp.Queue() + for thread_num in range(NUM_THREADS): + + #rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(thread_num, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + # path_list = [] + # for scp in glob.glob(os.path.join(INPUT_DIR, '*.lst')): + # tmp = [x.split('\t')[0] for x in open(scp).readlines()] + # print(len(tmp)) + # path_list = list(set(path_list) | set(tmp)) + # print(len(path_list)) + + + + if os.path.isfile(INPUT_DIR): + path_list = [x.strip() for x in open(INPUT_DIR).readlines()] + else: + #path_list = glob.glob(os.path.join(INPUT_DIR, '*.wav')) + path_list = [os.path.join(INPUT_DIR, x) for x in os.listdir(INPUT_DIR)] + + # for file in tqdm(INPUT_DIR.glob("**/*.wav")): + for file in tqdm(path_list): + file = Path(file) + # if not input_guard(file): + # continue + accum.append(file) + if len(accum) == BATCH_SIZE: + queue.put(accum.copy()) + accum.clear() + # tmp_file.append(file.as_posix()+'\n') + + for _ in range(NUM_THREADS): + queue.put(None) + + last_batches = queue.qsize() + queue_watcher = QueueWatcher(queue) + for p in processes: + p.join() + queue_watcher.set() + + #f_w = open(FILE_LIST,'a') + #f_w.writelines(tmp_file) + #f_w.close() + + + + + + + + + + + + diff --git a/data_pipeline/ssl/extract_xlsr.py b/data_pipeline/ssl/extract_xlsr.py new file mode 100644 index 0000000000000000000000000000000000000000..8ebddfa95b12852dc346cc4d1dc91ca8d871ac44 --- /dev/null +++ b/data_pipeline/ssl/extract_xlsr.py @@ -0,0 +1,238 @@ +import torchaudio +import glob +import numpy as np +import torch +import torch.multiprocessing as mp +import torchaudio +import joblib +import librosa +import threading +import math +import numpy as np +import itertools +from tqdm import tqdm +from pathlib import Path +import random +import os +from xlsr300m import WAV2VEC2_XLSR_300M +import sys + +LOGGING_INTERVAL = 10 +OFFSET = 0 +BATCH_SIZE = 1 + + +INPUT_DIR = sys.argv[1] +OUTPUT_DIR = sys.argv[2] +FEATURE_OUTPUT_DIR = os.path.join(OUTPUT_DIR, "xlsr_18l") +KM_OUTPUT_DIR = os.path.join(OUTPUT_DIR, "xlsr_18l_512") +NUM_THREADS = int(sys.argv[3]) + +os.environ["OMP_NUM_THREADS"] = "4" + +os.makedirs(FEATURE_OUTPUT_DIR, exist_ok=True) +os.makedirs(KM_OUTPUT_DIR, exist_ok=True) + +class ApplyKmeans(object): + def __init__(self, km_path): + self.km_model = joblib.load(km_path) + self.C_np = self.km_model.cluster_centers_.transpose() + self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) + + self.C = torch.from_numpy(self.C_np) + self.Cnorm = torch.from_numpy(self.Cnorm_np) + if torch.cuda.is_available(): + self.C = self.C.cuda() + self.Cnorm = self.Cnorm.cuda() + + def __call__(self, x): + if isinstance(x, torch.Tensor): + dist = ( + x.pow(2).sum(1, keepdim=True) + - 2 * torch.matmul(x, self.C) + + self.Cnorm + ) + return dist.argmin(dim=1).cpu().numpy() + else: + dist = ( + (x ** 2).sum(1, keepdims=True) + - 2 * np.matmul(x, self.C_np) + + self.Cnorm_np + ) + return np.argmin(dist, axis=1) + +def inference(rank, queue: mp.Queue): + ext_token = False + # def get_audio(path): + # sample, sr = torchaudio.load(path) + # sample = torchaudio.functional.resample(sample, sr, 16000).reshape(-1) + # return sample + #apply_kmeans = ApplyKmeans("km_512_youtube1") + apply_kmeans = ApplyKmeans("km_xlsr_512_18l") + + def get_audio(path): + wav, _ = librosa.load(path, sr=16000) + + wav = torch.FloatTensor(wav) + return wav + + # device = torch.device("cuda", OFFSET + rank) + #device = torch.device("cpu") + device = torch.device(f"cuda:{rank}") + + # bundle=torchaudio.pipelines.WAV2VEC2_XLSR_300M + bundle = WAV2VEC2_XLSR_300M + bundle._normalize_waveform=False + xlsr=bundle.get_model(dl_kwargs={'model_dir':'.','map_location':'cpu'}) + #xlsr=bundle.get_model(dl_kwargs={'model_dir':'/datablob/v-ziqianning/ckpts','map_location':'cpu'}) + xlsr = xlsr.eval() + xlsr = xlsr.requires_grad_(False) + xlsr = xlsr.to(device) + + + while True: + paths = queue.get() + if paths is None: + break + + #try: + # if os.path.exists(FEATURE_OUTPUT_DIR / f"{file_names[0]}.npy"): + # _ = np.load(FEATURE_OUTPUT_DIR / f"{file_names[0]}.npy") + # continue + #except: + # pass + + try: + file_names = [path.stem for path in paths] + if os.path.exists(os.path.join(FEATURE_OUTPUT_DIR, f"{file_names[0]}.npy"))\ + and os.path.exists(os.path.join(KM_OUTPUT_DIR, f"{file_names[0]}.npy")): + continue + samples = [get_audio(path) for path in paths] + lengths = [math.ceil(sample.shape[-1] / 320) for sample in samples] + batched_samples = torch.nn.utils.rnn.pad_sequence( + samples, batch_first=True + ).to(device) + + + #features = xlsr.extract_features(batched_samples,lengths=None,num_layers=6)[0][-1] + features = xlsr.extract_features(batched_samples,lengths=None,num_layers=18)[0][-1] + # [batch, frame, dim] of layer + + b, t, d = features.shape + + for feature, file_name, length in zip( + features.cpu().numpy(), file_names, lengths + ): + np.save(os.path.join(FEATURE_OUTPUT_DIR, f"{file_name}.npy"), feature) # [:length, :]) + km_feat = apply_kmeans(feature) + np.save(os.path.join(KM_OUTPUT_DIR, f"{file_name}.npy"), km_feat) # [:length, :]) + + except Exception as e: + print(f"{e} in {paths} with longest length of {max(lengths)}") + + + + + + + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + + +last_batches = None + + +@setInterval(LOGGING_INTERVAL) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + + +if __name__ == "__main__": + mp.set_start_method('spawn',force=True) + + gpu_num = torch.cuda.device_count() + + + print(f"Running with {NUM_THREADS} threads and batchsize {BATCH_SIZE}") + processes = [] + queue = mp.Queue() + for thread_num in range(NUM_THREADS): + + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + # path_list = [] + # for scp in glob.glob(os.path.join(INPUT_DIR, '*.lst')): + # tmp = [x.split('\t')[0] for x in open(scp).readlines()] + # print(len(tmp)) + # path_list = list(set(path_list) | set(tmp)) + # print(len(path_list)) + + + + if os.path.isfile(INPUT_DIR): + path_list = [x.strip() for x in open(INPUT_DIR).readlines()] + else: + path_list = glob.glob(os.path.join(INPUT_DIR, '*.wav')) + + # for file in tqdm(INPUT_DIR.glob("**/*.wav")): + for file in tqdm(path_list): + file = Path(file) + # if not input_guard(file): + # continue + accum.append(file) + if len(accum) == BATCH_SIZE: + queue.put(accum.copy()) + accum.clear() + # tmp_file.append(file.as_posix()+'\n') + + for _ in range(NUM_THREADS): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc="ssl") + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() + + #f_w = open(FILE_LIST,'a') + #f_w.writelines(tmp_file) + #f_w.close() + + + + + + + + + + + + diff --git a/data_pipeline/ssl/extract_xlsr_6l.py b/data_pipeline/ssl/extract_xlsr_6l.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f2c80d2e3cf4c418b6a09336cd4665bd85cef3 --- /dev/null +++ b/data_pipeline/ssl/extract_xlsr_6l.py @@ -0,0 +1,228 @@ +import torchaudio +import glob +import numpy as np +import torch +import torch.multiprocessing as mp +import torchaudio +import joblib +import librosa +import threading +import math +import numpy as np +import itertools +from tqdm import tqdm +from pathlib import Path +import random +import os +from xlsr300m import WAV2VEC2_XLSR_300M +import sys + +LOGGING_INTERVAL = 10 +OFFSET = 0 +BATCH_SIZE = 1 + + +INPUT_DIR = sys.argv[1] +OUTPUT_DIR = sys.argv[2] +FEATURE_OUTPUT_DIR = os.path.join(OUTPUT_DIR, "xlsr_bgm_6l") +#KM_OUTPUT_DIR = os.path.join(OUTPUT_DIR, "xlsr_24l_512") +NUM_THREADS = int(sys.argv[3]) + +os.environ["OMP_NUM_THREADS"] = "4" + +os.makedirs(FEATURE_OUTPUT_DIR, exist_ok=True) +#os.makedirs(KM_OUTPUT_DIR, exist_ok=True) + +class ApplyKmeans(object): + def __init__(self, km_path): + self.km_model = joblib.load(km_path) + self.C_np = self.km_model.cluster_centers_.transpose() + self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) + + self.C = torch.from_numpy(self.C_np) + self.Cnorm = torch.from_numpy(self.Cnorm_np) + if torch.cuda.is_available(): + self.C = self.C.cuda() + self.Cnorm = self.Cnorm.cuda() + + def __call__(self, x): + if isinstance(x, torch.Tensor): + dist = ( + x.pow(2).sum(1, keepdim=True) + - 2 * torch.matmul(x, self.C) + + self.Cnorm + ) + return dist.argmin(dim=1).cpu().numpy() + else: + dist = ( + (x ** 2).sum(1, keepdims=True) + - 2 * np.matmul(x, self.C_np) + + self.Cnorm_np + ) + return np.argmin(dist, axis=1) + +def inference(rank, queue: mp.Queue): + + def get_audio(path): + wav, _ = librosa.load(path, sr=16000) + + wav = torch.FloatTensor(wav) + return wav + + # device = torch.device("cuda", OFFSET + rank) + #device = torch.device("cpu") + device = torch.device(f"cuda:{rank}") + + # bundle=torchaudio.pipelines.WAV2VEC2_XLSR_300M + bundle = WAV2VEC2_XLSR_300M + bundle._normalize_waveform=False + xlsr=bundle.get_model(dl_kwargs={'model_dir':'.','map_location':'cpu'}) + #xlsr=bundle.get_model(dl_kwargs={'model_dir':'/datablob/v-ziqianning/ckpts','map_location':'cpu'}) + xlsr = xlsr.eval() + xlsr = xlsr.requires_grad_(False) + xlsr = xlsr.to(device) + + + while True: + paths = queue.get() + if paths is None: + break + + #try: + # if os.path.exists(FEATURE_OUTPUT_DIR / f"{file_names[0]}.npy"): + # _ = np.load(FEATURE_OUTPUT_DIR / f"{file_names[0]}.npy") + # continue + #except: + # pass + + try: + file_names = [path.stem for path in paths] + samples = [get_audio(path) for path in paths] + lengths = [math.ceil(sample.shape[-1] / 320) for sample in samples] + batched_samples = torch.nn.utils.rnn.pad_sequence( + samples, batch_first=True + ).to(device) + + + features = xlsr.extract_features(batched_samples,lengths=None,num_layers=6)[0][-1] + #features = xlsr.extract_features(batched_samples,lengths=None,num_layers=24)[0][-1] + # [batch, frame, dim] of layer + + b, t, d = features.shape + + for feature, file_name, length in zip( + features.cpu().numpy(), file_names, lengths + ): + np.save(os.path.join(FEATURE_OUTPUT_DIR, f"{file_name}.npy"), feature) # [:length, :]) + #km_feat = apply_kmeans(feature) + #np.save(os.path.join(KM_OUTPUT_DIR, f"{file_name}.npy"), km_feat) # [:length, :]) + + except Exception as e: + print(f"{e} in {paths} with longest length of {max(lengths)}") + + + + + + + + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + + +last_batches = None + + +@setInterval(LOGGING_INTERVAL) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + + +if __name__ == "__main__": + mp.set_start_method('spawn',force=True) + + gpu_num = torch.cuda.device_count() + + + print(f"Running with {NUM_THREADS} threads and batchsize {BATCH_SIZE}") + processes = [] + queue = mp.Queue() + for thread_num in range(NUM_THREADS): + + rank = thread_num % gpu_num + p = mp.Process(target=inference, args=(rank, queue)) + p.start() + processes.append(p) + + accum = [] + tmp_file = [] + + # path_list = [] + # for scp in glob.glob(os.path.join(INPUT_DIR, '*.lst')): + # tmp = [x.split('\t')[0] for x in open(scp).readlines()] + # print(len(tmp)) + # path_list = list(set(path_list) | set(tmp)) + # print(len(path_list)) + + + + if os.path.isfile(INPUT_DIR): + path_list = [x.strip() for x in open(INPUT_DIR).readlines()] + else: + path_list = glob.glob(os.path.join(INPUT_DIR, '*.wav')) + + # for file in tqdm(INPUT_DIR.glob("**/*.wav")): + for file in tqdm(path_list): + file = Path(file) + # if not input_guard(file): + # continue + accum.append(file) + if len(accum) == BATCH_SIZE: + queue.put(accum.copy()) + accum.clear() + # tmp_file.append(file.as_posix()+'\n') + + for _ in range(NUM_THREADS): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches, desc="ssl") + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() + + #f_w = open(FILE_LIST,'a') + #f_w.writelines(tmp_file) + #f_w.close() + + + + + + + + + + + + diff --git a/data_pipeline/ssl/km_xlsr_1024_18l b/data_pipeline/ssl/km_xlsr_1024_18l new file mode 100644 index 0000000000000000000000000000000000000000..d8c3f3daa185dfb8499cfe5a63b62d9a77330bff --- /dev/null +++ b/data_pipeline/ssl/km_xlsr_1024_18l @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f62547ca7c1a25974ca21f582dad49c46c879d8456b51fd7c1ac05f91438d6 +size 4199258 diff --git a/data_pipeline/ssl/km_xlsr_512_18l b/data_pipeline/ssl/km_xlsr_512_18l new file mode 100644 index 0000000000000000000000000000000000000000..848579f4929f761324e55797f354e0ce4f87c0d3 --- /dev/null +++ b/data_pipeline/ssl/km_xlsr_512_18l @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35b2728c3745a1b797bc836d6a7dd0ca16804560b8735868db747606d3052b72 +size 2100058 diff --git a/data_pipeline/ssl/learn_kmeans.py b/data_pipeline/ssl/learn_kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..8401748eea6d37229ebd886a8f0f98eb0f424e14 --- /dev/null +++ b/data_pipeline/ssl/learn_kmeans.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +import sys + +import numpy as np +from sklearn.cluster import MiniBatchKMeans +from tqdm import tqdm + +import joblib + +logging.basicConfig( + format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=os.environ.get("LOGLEVEL", "INFO").upper(), + stream=sys.stdout, +) +logger = logging.getLogger("learn_kmeans") + + +def get_km_model( + n_clusters, + init, + max_iter, + batch_size, + tol, + max_no_improvement, + n_init, + reassignment_ratio, +): + return MiniBatchKMeans( + n_clusters=n_clusters, + init=init, + max_iter=max_iter, + batch_size=batch_size, + verbose=1, + compute_labels=False, + tol=tol, + max_no_improvement=max_no_improvement, + init_size=None, + n_init=n_init, + reassignment_ratio=reassignment_ratio, + ) + + +def load_feature(feat_path, leng_path, percent): + with open(leng_path, "r") as f: + lengs = [int(line.rstrip()) for line in f] + offsets = [0] + np.cumsum(lengs[:-1]).tolist() + + if percent <= 0: + print(f"{len(feat)} frames ") + return np.load(feat_path, mmap_mode="r") + else: + nsample = int(np.ceil(len(lengs) * percent)) + indices = np.random.choice(len(lengs), nsample, replace=False) + print(len(lengs), nsample, len(indices)) + feat = np.load(feat_path, mmap_mode="r") + sampled_feat = np.concatenate( + [feat[offsets[i]: offsets[i] + lengs[i]] for i in tqdm(indices)], axis=0 + ) + print(f"sampled {nsample} utterances, {len(sampled_feat)} frames ") + logger.info( + ( + f"sampled {nsample} utterances, {len(sampled_feat)} frames " + ) + ) + return sampled_feat + + +#def load_feature(feat_path, leng_path, percent): +# assert percent <= 1.0 +# feat = np.concatenate( +# [ +# load_feature_shard(feat_path, leng_path, percent) +# for r in range(nshard) +# ], +# axis=0, +# ) +# logging.info(f"loaded feature with dimension {feat.shape}") +# return feat + + +def learn_kmeans( + feat_path, + leng_path, + km_path, + n_clusters, + seed, + percent, + init, + max_iter, + batch_size, + tol, + n_init, + reassignment_ratio, + max_no_improvement, +): + np.random.seed(seed) + feat = load_feature(feat_path, leng_path, percent) + km_model = get_km_model( + n_clusters, + init, + max_iter, + batch_size, + tol, + max_no_improvement, + n_init, + reassignment_ratio, + ) + km_model.fit(feat) + joblib.dump(km_model, km_path) + + inertia = -km_model.score(feat) / len(feat) + logger.info("total intertia: %.5f", inertia) + logger.info("finished successfully") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("feat_path", type=str) + parser.add_argument("leng_path", type=str) + parser.add_argument("km_path", type=str) + parser.add_argument("n_clusters", type=int) + parser.add_argument("--seed", default=0, type=int) + parser.add_argument( + "--percent", default=-1, type=float, help="sample a subset; -1 for all" + ) + parser.add_argument("--init", default="k-means++") + parser.add_argument("--max_iter", default=100, type=int) + parser.add_argument("--batch_size", default=10000, type=int) + parser.add_argument("--tol", default=0.0, type=float) + parser.add_argument("--max_no_improvement", default=100, type=int) + parser.add_argument("--n_init", default=20, type=int) + parser.add_argument("--reassignment_ratio", default=0.0, type=float) + args = parser.parse_args() + logging.info(str(args)) + + learn_kmeans(**vars(args)) diff --git a/data_pipeline/ssl/modules.py b/data_pipeline/ssl/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..1dcfc6f061cc189ca51fc90107116f38e2e48daf --- /dev/null +++ b/data_pipeline/ssl/modules.py @@ -0,0 +1,827 @@ +# -------------------------------------------------------- +# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf) +# Github source: https://github.com/microsoft/unilm/tree/master/wavlm +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Based on fairseq code bases +# https://github.com/pytorch/fairseq +# -------------------------------------------------------- + +import math +import warnings +from typing import Dict, Optional, Tuple +import torch +from torch import Tensor, nn +from torch.nn import Parameter +import torch.nn.functional as F + + +class TransposeLast(nn.Module): + def __init__(self, deconstruct_idx=None): + super().__init__() + self.deconstruct_idx = deconstruct_idx + + def forward(self, x): + if self.deconstruct_idx is not None: + x = x[self.deconstruct_idx] + return x.transpose(-2, -1) + + +class Fp32LayerNorm(nn.LayerNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, input): + output = F.layer_norm( + input.float(), + self.normalized_shape, + self.weight.float() if self.weight is not None else None, + self.bias.float() if self.bias is not None else None, + self.eps, + ) + return output.type_as(input) + + +class Fp32GroupNorm(nn.GroupNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, input): + output = F.group_norm( + input.float(), + self.num_groups, + self.weight.float() if self.weight is not None else None, + self.bias.float() if self.bias is not None else None, + self.eps, + ) + return output.type_as(input) + + +class GradMultiply(torch.autograd.Function): + @staticmethod + def forward(ctx, x, scale): + ctx.scale = scale + res = x.new(x) + return res + + @staticmethod + def backward(ctx, grad): + return grad * ctx.scale, None + + +class SamePad(nn.Module): + def __init__(self, kernel_size, causal=False): + super().__init__() + if causal: + self.remove = kernel_size - 1 + else: + self.remove = 1 if kernel_size % 2 == 0 else 0 + + def forward(self, x): + if self.remove > 0: + x = x[:, :, : -self.remove] + return x + + +class Swish(nn.Module): + """Swish function + """ + + def __init__(self): + """Construct an MultiHeadedAttention object.""" + super(Swish, self).__init__() + self.act = torch.nn.Sigmoid() + + def forward(self, x): + return x * self.act(x) + + +class GLU_Linear(nn.Module): + def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True): + super(GLU_Linear, self).__init__() + + self.glu_type = glu_type + self.output_dim = output_dim + + if glu_type == "sigmoid": + self.glu_act = torch.nn.Sigmoid() + elif glu_type == "swish": + self.glu_act = Swish() + elif glu_type == "relu": + self.glu_act = torch.nn.ReLU() + elif glu_type == "gelu": + self.glu_act = torch.nn.GELU() + + if bias_in_glu: + self.linear = nn.Linear(input_dim, output_dim * 2, True) + else: + self.linear = nn.Linear(input_dim, output_dim * 2, False) + + def forward(self, x): + # to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case + x = self.linear(x) + + if self.glu_type == "bilinear": + x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2]) + else: + x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2])) + + return x + + +def gelu_accurate(x): + if not hasattr(gelu_accurate, "_a"): + gelu_accurate._a = math.sqrt(2 / math.pi) + return ( + 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) + ) + + +def gelu(x: torch.Tensor) -> torch.Tensor: + return torch.nn.functional.gelu(x.float()).type_as(x) + + +def get_activation_fn(activation: str): + """Returns the activation function corresponding to `activation`""" + + if activation == "relu": + return F.relu + elif activation == "gelu": + return gelu + elif activation == "gelu_fast": + warnings.warn( + "--activation-fn=gelu_fast has been renamed to gelu_accurate" + ) + return gelu_accurate + elif activation == "gelu_accurate": + return gelu_accurate + elif activation == "tanh": + return torch.tanh + elif activation == "linear": + return lambda x: x + elif activation == "glu": + return lambda x: x + else: + raise RuntimeError("--activation-fn {} not supported".format(activation)) + + +def init_bert_params(module): + """ + Initialize the weights specific to the BERT Model. + This overrides the default initializations depending on the specified arguments. + 1. If normal_init_linear_weights is set then weights of linear + layer will be initialized using the normal distribution and + bais will be set to the specified value. + 2. If normal_init_embed_weights is set then weights of embedding + layer will be initialized using the normal distribution. + 3. If normal_init_proj_weights is set then weights of + in_project_weight for MultiHeadAttention initialized using + the normal distribution (to be validated). + """ + + def normal_(data): + # with FSDP, module params will be on CUDA, so we cast them back to CPU + # so that the RNG is consistent with and without FSDP + data.copy_( + data.cpu().normal_(mean=0.0, std=0.02).to(data.device) + ) + + if isinstance(module, nn.Linear): + normal_(module.weight.data) + if module.bias is not None: + module.bias.data.zero_() + if isinstance(module, nn.Embedding): + normal_(module.weight.data) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if isinstance(module, MultiheadAttention): + normal_(module.q_proj.weight.data) + normal_(module.k_proj.weight.data) + normal_(module.v_proj.weight.data) + + +def quant_noise(module, p, block_size): + """ + Wraps modules and applies quantization noise to the weights for + subsequent quantization with Iterative Product Quantization as + described in "Training with Quantization Noise for Extreme Model Compression" + + Args: + - module: nn.Module + - p: amount of Quantization Noise + - block_size: size of the blocks for subsequent quantization with iPQ + + Remarks: + - Module weights must have the right sizes wrt the block size + - Only Linear, Embedding and Conv2d modules are supported for the moment + - For more detail on how to quantize by blocks with convolutional weights, + see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" + - We implement the simplest form of noise here as stated in the paper + which consists in randomly dropping blocks + """ + + # if no quantization noise, don't register hook + if p <= 0: + return module + + # supported modules + assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) + + # test whether module.weight has the right sizes wrt block_size + is_conv = module.weight.ndim == 4 + + # 2D matrix + if not is_conv: + assert ( + module.weight.size(1) % block_size == 0 + ), "Input features must be a multiple of block sizes" + + # 4D matrix + else: + # 1x1 convolutions + if module.kernel_size == (1, 1): + assert ( + module.in_channels % block_size == 0 + ), "Input channels must be a multiple of block sizes" + # regular convolutions + else: + k = module.kernel_size[0] * module.kernel_size[1] + assert k % block_size == 0, "Kernel size must be a multiple of block size" + + def _forward_pre_hook(mod, input): + # no noise for evaluation + if mod.training: + if not is_conv: + # gather weight and sizes + weight = mod.weight + in_features = weight.size(1) + out_features = weight.size(0) + + # split weight matrix into blocks and randomly drop selected blocks + mask = torch.zeros( + in_features // block_size * out_features, device=weight.device + ) + mask.bernoulli_(p) + mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) + + else: + # gather weight and sizes + weight = mod.weight + in_channels = mod.in_channels + out_channels = mod.out_channels + + # split weight matrix into blocks and randomly drop selected blocks + if mod.kernel_size == (1, 1): + mask = torch.zeros( + int(in_channels // block_size * out_channels), + device=weight.device, + ) + mask.bernoulli_(p) + mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) + else: + mask = torch.zeros( + weight.size(0), weight.size(1), device=weight.device + ) + mask.bernoulli_(p) + mask = ( + mask.unsqueeze(2) + .unsqueeze(3) + .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) + ) + + # scale weights and apply mask + mask = mask.to( + torch.bool + ) # x.bool() is not currently supported in TorchScript + s = 1 / (1 - p) + mod.weight.data = s * weight.masked_fill(mask, 0) + + module.register_forward_pre_hook(_forward_pre_hook) + return module + + +class MultiheadAttention(nn.Module): + """Multi-headed attention. + + See "Attention Is All You Need" for more details. + """ + + def __init__( + self, + embed_dim, + num_heads, + kdim=None, + vdim=None, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + self_attention=False, + encoder_decoder_attention=False, + q_noise=0.0, + qn_block_size=8, + has_relative_attention_bias=False, + num_buckets=32, + max_distance=128, + gru_rel_pos=False, + rescale_init=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout_module = nn.Dropout(dropout) + + self.has_relative_attention_bias = has_relative_attention_bias + self.num_buckets = num_buckets + self.max_distance = max_distance + if self.has_relative_attention_bias: + self.relative_attention_bias = nn.Embedding(num_buckets, num_heads) + + self.head_dim = embed_dim // num_heads + self.q_head_dim = self.head_dim + self.k_head_dim = self.head_dim + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + + assert not self.self_attention or self.qkv_same_dim, ( + "Self-attention requires query, key and " "value to be of the same size" + ) + + k_bias = True + if rescale_init: + k_bias = False + + k_embed_dim = embed_dim + q_embed_dim = embed_dim + + self.k_proj = quant_noise( + nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size + ) + self.v_proj = quant_noise( + nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size + ) + self.q_proj = quant_noise( + nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size + ) + + self.out_proj = quant_noise( + nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size + ) + + if add_bias_kv: + self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) + self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.gru_rel_pos = gru_rel_pos + if self.gru_rel_pos: + self.grep_linear = nn.Linear(self.q_head_dim, 8) + self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1)) + + self.reset_parameters() + + def reset_parameters(self): + if self.qkv_same_dim: + # Empirically observed the convergence to be much better with + # the scaled initialization + nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) + nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) + nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) + else: + nn.init.xavier_uniform_(self.k_proj.weight) + nn.init.xavier_uniform_(self.v_proj.weight) + nn.init.xavier_uniform_(self.q_proj.weight) + + nn.init.xavier_uniform_(self.out_proj.weight) + if self.out_proj.bias is not None: + nn.init.constant_(self.out_proj.bias, 0.0) + if self.bias_k is not None: + nn.init.xavier_normal_(self.bias_k) + if self.bias_v is not None: + nn.init.xavier_normal_(self.bias_v) + if self.has_relative_attention_bias: + nn.init.xavier_normal_(self.relative_attention_bias.weight) + + def _relative_positions_bucket(self, relative_positions, bidirectional=True): + num_buckets = self.num_buckets + max_distance = self.max_distance + relative_buckets = 0 + + if bidirectional: + num_buckets = num_buckets // 2 + relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets + relative_positions = torch.abs(relative_positions) + else: + relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions)) + + max_exact = num_buckets // 2 + is_small = relative_positions < max_exact + + relative_postion_if_large = max_exact + ( + torch.log(relative_positions.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_postion_if_large = torch.min( + relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) + ) + + relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large) + return relative_buckets + + def compute_bias(self, query_length, key_length): + context_position = torch.arange(query_length, dtype=torch.long)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long)[None, :] + relative_position = memory_position - context_position + relative_position_bucket = self._relative_positions_bucket( + relative_position, + bidirectional=True + ) + relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) + values = self.relative_attention_bias(relative_position_bucket) + values = values.permute([2, 0, 1]) + return values + + def forward( + self, + query, + key: Optional[Tensor], + value: Optional[Tensor], + key_padding_mask: Optional[Tensor] = None, + incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, + need_weights: bool = True, + static_kv: bool = False, + attn_mask: Optional[Tensor] = None, + before_softmax: bool = False, + need_head_weights: bool = False, + position_bias: Optional[Tensor] = None + ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: + """Input shape: Time x Batch x Channel + + Args: + key_padding_mask (ByteTensor, optional): mask to exclude + keys that are pads, of shape `(batch, src_len)`, where + padding elements are indicated by 1s. + need_weights (bool, optional): return the attention weights, + averaged over heads (default: False). + attn_mask (ByteTensor, optional): typically used to + implement causal attention, where the mask prevents the + attention from looking forward in time (default: None). + before_softmax (bool, optional): return the raw attention + weights and values before the attention softmax. + need_head_weights (bool, optional): return the attention + weights for each head. Implies *need_weights*. Default: + return the average attention weights over all heads. + """ + if need_head_weights: + need_weights = True + + is_tpu = query.device.type == "xla" + + tgt_len, bsz, embed_dim = query.size() + src_len = tgt_len + assert embed_dim == self.embed_dim + assert list(query.size()) == [tgt_len, bsz, embed_dim] + if key is not None: + src_len, key_bsz, _ = key.size() + if not torch.jit.is_scripting(): + assert key_bsz == bsz + assert value is not None + assert src_len, bsz == value.shape[:2] + + if self.has_relative_attention_bias and position_bias is None: + position_bias = self.compute_bias(tgt_len, src_len) + position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len) + + if ( + not is_tpu # don't use PyTorch version on TPUs + and incremental_state is None + and not static_kv + # A workaround for quantization to work. Otherwise JIT compilation + # treats bias in linear module as method. + and not torch.jit.is_scripting() + and self.q_head_dim == self.head_dim + ): + assert key is not None and value is not None + assert attn_mask is None + + attn_mask_rel_pos = None + if position_bias is not None: + attn_mask_rel_pos = position_bias + if self.gru_rel_pos: + query_layer = query.transpose(0, 1) + new_x_shape = query_layer.size()[:-1] + (self.num_heads, -1) + query_layer = query_layer.view(*new_x_shape) + query_layer = query_layer.permute(0, 2, 1, 3) + _B, _H, _L, __ = query_layer.size() + + gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view( + _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1) + gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0 + attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias + + attn_mask_rel_pos = attn_mask_rel_pos.view((-1, tgt_len, tgt_len)) + k_proj_bias = self.k_proj.bias + if k_proj_bias is None: + k_proj_bias = torch.zeros_like(self.q_proj.bias) + + x, attn = F.multi_head_attention_forward( + query, + key, + value, + self.embed_dim, + self.num_heads, + torch.empty([0]), + torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), + self.bias_k, + self.bias_v, + self.add_zero_attn, + self.dropout_module.p, + self.out_proj.weight, + self.out_proj.bias, + self.training, + # self.training or self.dropout_module.apply_during_inference, + key_padding_mask, + need_weights, + attn_mask_rel_pos, + use_separate_proj_weight=True, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + ) + return x, attn, position_bias + + if incremental_state is not None: + saved_state = self._get_input_buffer(incremental_state) + if saved_state is not None and "prev_key" in saved_state: + # previous time steps are cached - no need to recompute + # key and value if they are static + if static_kv: + assert self.encoder_decoder_attention and not self.self_attention + key = value = None + else: + saved_state = None + + if self.self_attention: + q = self.q_proj(query) + k = self.k_proj(query) + v = self.v_proj(query) + elif self.encoder_decoder_attention: + # encoder-decoder attention + q = self.q_proj(query) + if key is None: + assert value is None + k = v = None + else: + k = self.k_proj(key) + v = self.v_proj(key) + + else: + assert key is not None and value is not None + q = self.q_proj(query) + k = self.k_proj(key) + v = self.v_proj(value) + q *= self.scaling + + if self.bias_k is not None: + assert self.bias_v is not None + k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat( + [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 + ) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [ + key_padding_mask, + key_padding_mask.new_zeros(key_padding_mask.size(0), 1), + ], + dim=1, + ) + + q = ( + q.contiguous() + .view(tgt_len, bsz * self.num_heads, self.q_head_dim) + .transpose(0, 1) + ) + if k is not None: + k = ( + k.contiguous() + .view(-1, bsz * self.num_heads, self.k_head_dim) + .transpose(0, 1) + ) + if v is not None: + v = ( + v.contiguous() + .view(-1, bsz * self.num_heads, self.head_dim) + .transpose(0, 1) + ) + + if saved_state is not None: + # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) + if "prev_key" in saved_state: + _prev_key = saved_state["prev_key"] + assert _prev_key is not None + prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + k = prev_key + else: + assert k is not None + k = torch.cat([prev_key, k], dim=1) + src_len = k.size(1) + if "prev_value" in saved_state: + _prev_value = saved_state["prev_value"] + assert _prev_value is not None + prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) + if static_kv: + v = prev_value + else: + assert v is not None + v = torch.cat([prev_value, v], dim=1) + prev_key_padding_mask: Optional[Tensor] = None + if "prev_key_padding_mask" in saved_state: + prev_key_padding_mask = saved_state["prev_key_padding_mask"] + assert k is not None and v is not None + key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( + key_padding_mask=key_padding_mask, + prev_key_padding_mask=prev_key_padding_mask, + batch_size=bsz, + src_len=k.size(1), + static_kv=static_kv, + ) + + saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) + saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) + saved_state["prev_key_padding_mask"] = key_padding_mask + # In this branch incremental_state is never None + assert incremental_state is not None + incremental_state = self._set_input_buffer(incremental_state, saved_state) + assert k is not None + assert k.size(1) == src_len + + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.dim() == 0: + key_padding_mask = None + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if self.add_zero_attn: + assert v is not None + src_len += 1 + k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) + v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) + if attn_mask is not None: + attn_mask = torch.cat( + [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 + ) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [ + key_padding_mask, + torch.zeros(key_padding_mask.size(0), 1).type_as( + key_padding_mask + ), + ], + dim=1, + ) + + attn_weights = torch.bmm(q, k.transpose(1, 2)) + attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) + + assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] + + if attn_mask is not None: + attn_mask = attn_mask.unsqueeze(0) + attn_weights += attn_mask + + if key_padding_mask is not None: + # don't attend to padding symbols + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + if not is_tpu: + attn_weights = attn_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), + float("-inf"), + ) + else: + attn_weights = attn_weights.transpose(0, 2) + attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) + attn_weights = attn_weights.transpose(0, 2) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if before_softmax: + return attn_weights, v, position_bias + + if position_bias is not None: + if self.gru_rel_pos == 1: + query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) + _B, _H, _L, __ = query_layer.size() + gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view( + _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1) + gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0 + position_bias = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias + + position_bias = position_bias.view(attn_weights.size()) + + attn_weights = attn_weights + position_bias + + attn_weights_float = F.softmax( + attn_weights, dim=-1 + ) + attn_weights = attn_weights_float.type_as(attn_weights) + attn_probs = self.dropout_module(attn_weights) + + assert v is not None + attn = torch.bmm(attn_probs, v) + assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] + attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn = self.out_proj(attn) + attn_weights: Optional[Tensor] = None + if need_weights: + attn_weights = attn_weights_float.view( + bsz, self.num_heads, tgt_len, src_len + ).transpose(1, 0) + if not need_head_weights: + # average attention weights over heads + attn_weights = attn_weights.mean(dim=0) + + return attn, attn_weights, position_bias + + @staticmethod + def _append_prev_key_padding_mask( + key_padding_mask: Optional[Tensor], + prev_key_padding_mask: Optional[Tensor], + batch_size: int, + src_len: int, + static_kv: bool, + ) -> Optional[Tensor]: + # saved key padding masks have shape (bsz, seq_len) + if prev_key_padding_mask is not None and static_kv: + new_key_padding_mask = prev_key_padding_mask + elif prev_key_padding_mask is not None and key_padding_mask is not None: + new_key_padding_mask = torch.cat( + [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 + ) + # During incremental decoding, as the padding token enters and + # leaves the frame, there will be a time when prev or current + # is None + elif prev_key_padding_mask is not None: + if src_len > prev_key_padding_mask.size(1): + filler = torch.zeros( + (batch_size, src_len - prev_key_padding_mask.size(1)), + device=prev_key_padding_mask.device, + ) + new_key_padding_mask = torch.cat( + [prev_key_padding_mask.float(), filler.float()], dim=1 + ) + else: + new_key_padding_mask = prev_key_padding_mask.float() + elif key_padding_mask is not None: + if src_len > key_padding_mask.size(1): + filler = torch.zeros( + (batch_size, src_len - key_padding_mask.size(1)), + device=key_padding_mask.device, + ) + new_key_padding_mask = torch.cat( + [filler.float(), key_padding_mask.float()], dim=1 + ) + else: + new_key_padding_mask = key_padding_mask.float() + else: + new_key_padding_mask = prev_key_padding_mask + return new_key_padding_mask + + def _get_input_buffer( + self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] + ) -> Dict[str, Optional[Tensor]]: + result = self.get_incremental_state(incremental_state, "attn_state") + if result is not None: + return result + else: + empty_result: Dict[str, Optional[Tensor]] = {} + return empty_result + + def _set_input_buffer( + self, + incremental_state: Dict[str, Dict[str, Optional[Tensor]]], + buffer: Dict[str, Optional[Tensor]], + ): + return self.set_incremental_state(incremental_state, "attn_state", buffer) + + def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): + return attn_weights diff --git a/data_pipeline/ssl/pack_npy.py b/data_pipeline/ssl/pack_npy.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d36d43ce31774ab88aa90782f3a8b8db6f9ffd --- /dev/null +++ b/data_pipeline/ssl/pack_npy.py @@ -0,0 +1,38 @@ +from npy_append_array import NpyAppendArray +import sys +import os +import numpy as np +from tqdm import tqdm +import random + +in_dir = sys.argv[1] +out_file = sys.argv[2] +out_len_file = sys.argv[3] +percentage = float(sys.argv[4]) + + +out_len_file = open(out_len_file, 'a') + + +#with NpyAppendArray(out_file, delete_if_exists=True) as npaa: +cnt = 0 +with NpyAppendArray(out_file, delete_if_exists=True) as npaa: + for file in tqdm(os.listdir(in_dir)): + try: + + #if percentage > 0 and random.random() > percentage: + # continue + arr = np.load(os.path.join(in_dir, file)) + if percentage > 0: + indices = np.random.choice(arr.shape[0], int(arr.shape[0] * percentage), replace=False) + arr = arr[indices] + npaa.append(arr) + out_len_file.write(f"{str(arr.shape[0])}\n") + cnt += 1 + except: + continue + +data = np.load(out_file, mmap_mode="r") + +print(data.shape) +print(len(os.listdir(in_dir)), cnt) diff --git a/data_pipeline/ssl/utils.py b/data_pipeline/ssl/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8917e8d609a530c78b2ae3761e257d4807e1a920 --- /dev/null +++ b/data_pipeline/ssl/utils.py @@ -0,0 +1,345 @@ +from typing import List, Optional, Tuple + +import torch +from torch import nn, Tensor + +from torchaudio._internal import load_state_dict_from_url +from torchaudio.models import wav2vec2_model, Wav2Vec2Model#, wavlm_model + + +def _get_model(type_, params): + factories = { + "Wav2Vec2": wav2vec2_model + } + if type_ not in factories: + raise ValueError(f"Supported model types are {tuple(factories.keys())}. Found: {type_}") + factory = factories[type_] + return factory(**params) + + +class _Wav2Vec2Model(nn.Module): + """Wrapper class for :py:class:`~torchaudio.models.Wav2Vec2Model`. + + This is used for layer normalization at the input + """ + + def __init__(self, model: Wav2Vec2Model, normalize_waveform: bool, apply_log_softmax: bool, append_star: bool): + super().__init__() + self.model = model + self.normalize_waveform = normalize_waveform + self.apply_log_softmax = apply_log_softmax + self.append_star = append_star + + def forward(self, waveforms: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: + if self.normalize_waveform: + waveforms = nn.functional.layer_norm(waveforms, waveforms.shape) + output, output_lengths = self.model(waveforms, lengths) + if self.apply_log_softmax: + output = torch.nn.functional.log_softmax(output, dim=-1) + if self.append_star: + star_dim = torch.zeros((1, output.size(1), 1), dtype=output.dtype, device=output.device) + output = torch.cat((output, star_dim), dim=-1) + return output, output_lengths + + @torch.jit.export + def extract_features( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> Tuple[List[Tensor], Optional[Tensor]]: + if self.normalize_waveform: + waveforms = nn.functional.layer_norm(waveforms, waveforms.shape) + return self.model.extract_features(waveforms, lengths, num_layers) + + +def _extend_model(module, normalize_waveform, apply_log_softmax=False, append_star=False): + """Add extra transformations to the model""" + return _Wav2Vec2Model(module, normalize_waveform, apply_log_softmax, append_star) + + +def _remove_aux_axes(state_dict, axes): + # Remove the seemingly unnecessary axis + # For ASR task, the pretrained weights originated from fairseq has unrelated dimensions at index 1, 2, 3 + # It's originated from the Dictionary implementation of fairseq, which was intended for NLP tasks, + # but not used during the ASR training. + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/data/dictionary.py#L21-L37 + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/criterions/ctc.py#L126-L129 + # + # Also, some pretrained weights originated from voxpopuli has an extra dimensions that almost never used and + # that resembles mistake. + # The label `1` shows up in the training dataset of German (1 out of 16M), + # English (1 / 28M), Spanish (1 / 9.4M), Romanian (1 / 4.7M) and Polish (6 / 5.8M) + for key in ["aux.weight", "aux.bias"]: + mat = state_dict[key] + state_dict[key] = torch.stack([mat[i] for i in range(mat.size(0)) if i not in axes]) + + +def _get_state_dict(url, dl_kwargs, remove_axes=None): + if not url.startswith("https"): + url = f"https://download.pytorch.org/torchaudio/models/{url}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + if remove_axes: + _remove_aux_axes(state_dict, remove_axes) + return state_dict + + +def _get_en_labels(): + return ( + "|", + "E", + "T", + "A", + "O", + "N", + "I", + "H", + "S", + "R", + "D", + "L", + "U", + "M", + "W", + "C", + "F", + "G", + "Y", + "P", + "B", + "V", + "K", + "'", + "X", + "J", + "Q", + "Z", + ) + + +def _get_de_labels(): + return ( + "|", + "e", + "n", + "i", + "r", + "s", + "t", + "a", + "d", + "h", + "u", + "l", + "g", + "c", + "m", + "o", + "b", + "w", + "f", + "k", + "z", + "p", + "v", + "ü", + "ä", + "ö", + "j", + "ß", + "y", + "x", + "q", + ) + + +def _get_vp_en_labels(): + return ( + "|", + "e", + "t", + "o", + "i", + "a", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "u", + "m", + "p", + "f", + "g", + "w", + "y", + "b", + "v", + "k", + "x", + "j", + "q", + "z", + ) + + +def _get_es_labels(): + return ( + "|", + "e", + "a", + "o", + "s", + "n", + "r", + "i", + "l", + "d", + "c", + "t", + "u", + "p", + "m", + "b", + "q", + "y", + "g", + "v", + "h", + "ó", + "f", + "í", + "á", + "j", + "z", + "ñ", + "é", + "x", + "ú", + "k", + "w", + "ü", + ) + + +def _get_fr_labels(): + return ( + "|", + "e", + "s", + "n", + "i", + "t", + "r", + "a", + "o", + "u", + "l", + "d", + "c", + "p", + "m", + "é", + "v", + "q", + "f", + "g", + "b", + "h", + "x", + "à", + "j", + "è", + "y", + "ê", + "z", + "ô", + "k", + "ç", + "œ", + "û", + "ù", + "î", + "â", + "w", + "ï", + "ë", + "ü", + "æ", + ) + + +def _get_it_labels(): + return ( + "|", + "e", + "i", + "a", + "o", + "n", + "t", + "r", + "l", + "s", + "c", + "d", + "u", + "p", + "m", + "g", + "v", + "h", + "z", + "f", + "b", + "q", + "à", + "è", + "ù", + "é", + "ò", + "ì", + "k", + "y", + "x", + "w", + "j", + "ó", + "í", + "ï", + ) + + +def _get_mms_labels(): + return ( + "a", + "i", + "e", + "n", + "o", + "u", + "t", + "s", + "r", + "m", + "k", + "l", + "d", + "g", + "h", + "y", + "b", + "p", + "w", + "c", + "v", + "j", + "z", + "f", + "'", + "q", + "x", + ) diff --git a/data_pipeline/ssl/xlsr300m.py b/data_pipeline/ssl/xlsr300m.py new file mode 100644 index 0000000000000000000000000000000000000000..2d8a1862f5a20bc6476167c49823457ca736ae91 --- /dev/null +++ b/data_pipeline/ssl/xlsr300m.py @@ -0,0 +1,149 @@ + +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple +from torch.nn import Module +# from torchaudio.pipelines._wav2vec2 import utils +#from . import utils +import utils + + + + +@dataclass +class Wav2Vec2Bundle: + """Data class that bundles associated information to use pretrained :py:class:`~torchaudio.models.Wav2Vec2Model`. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + Please see below for the usage and the available values. + + Example - Feature Extraction + >>> import torchaudio + >>> + >>> bundle = torchaudio.pipelines.HUBERT_BASE + >>> + >>> # Build the model and load pretrained weight. + >>> model = bundle.get_model() + Downloading: + 100%|███████████████████████████████| 360M/360M [00:06<00:00, 60.6MB/s] + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> + >>> # Extract acoustic features + >>> features, _ = model.extract_features(waveform) + """ # noqa: E501 + + _path: str + _params: Dict[str, Any] + _sample_rate: float + _normalize_waveform: bool + _model_type: str + + @property + def sample_rate(self) -> float: + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + def _get_state_dict(self, dl_kwargs): + # Note: This method is overridden in ASR bundle + return utils._get_state_dict(self._path, dl_kwargs) + + def get_model(self, *, dl_kwargs=None) -> Module: + """Construct the model and load the pretrained weight. + + The weight file is downloaded from the internet and cached with + :func:`torch.hub.load_state_dict_from_url` + + Args: + dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Variation of :py:class:`~torchaudio.models.Wav2Vec2Model`. + + For the models listed below, an additional layer normalization is performed on the input. + + For all other models, a :py:class:`~torchaudio.models.Wav2Vec2Model` instance is returned. + + - WAV2VEC2_LARGE_LV60K + - WAV2VEC2_ASR_LARGE_LV60K_10M + - WAV2VEC2_ASR_LARGE_LV60K_100H + - WAV2VEC2_ASR_LARGE_LV60K_960H + - WAV2VEC2_XLSR53 + - WAV2VEC2_XLSR_300M + - WAV2VEC2_XLSR_1B + - WAV2VEC2_XLSR_2B + - HUBERT_LARGE + - HUBERT_XLARGE + - HUBERT_ASR_LARGE + - HUBERT_ASR_XLARGE + - WAVLM_LARGE + """ + model = utils._get_model(self._model_type, self._params) + state_dict = self._get_state_dict(dl_kwargs) + model.load_state_dict(state_dict) + if self._normalize_waveform: + model = utils._extend_model(model, normalize_waveform=True) + model.eval() + return model + + + + +WAV2VEC2_XLSR_300M = Wav2Vec2Bundle( + "wav2vec2_xlsr_300m.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _model_type="Wav2Vec2", + _sample_rate=16000, + _normalize_waveform=True, +) +WAV2VEC2_XLSR_300M.__doc__ = """XLS-R model with 300 million parameters, +pre-trained on 436,000 hours of unlabeled audio from multiple datasets ( +*Multilingual LibriSpeech* :cite:`Pratap_2020`, +*CommonVoice* :cite:`ardila2020common`, +*VoxLingua107* :cite:`valk2021voxlingua107`, +*BABEL* :cite:`Gales2014SpeechRA`, and +*VoxPopuli* :cite:`voxpopuli`) in 128 languages, +not fine-tuned. + +Originally published by the authors of *XLS-R* :cite:`babu2021xls` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for usage details. +""" # noqa: E501 \ No newline at end of file diff --git a/data_pipeline/vad/vad_tool.py b/data_pipeline/vad/vad_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..082a031c1c26728ee5fb529fa4be6c7ed35f2e0f --- /dev/null +++ b/data_pipeline/vad/vad_tool.py @@ -0,0 +1,446 @@ +import collections +import sys +import librosa +import numpy +import random +import time +import torchaudio +from scipy.io.wavfile import read + +MU = 1800 + +def read_wave_to_frames_withbgm(path, bgmpath, sr=16000, save_sr=44100, frame_duration=10): + orig_sr, orig_wav = read(path) + if orig_wav.dtype == numpy.int16: + orig_wav = orig_wav / 32768. + if len(orig_wav.shape) > 1: + orig_wav = numpy.mean(orig_wav, -1) + wav = librosa.resample(orig_wav, orig_sr=orig_sr, target_sr=sr, res_type='polyphase') + wav = (wav * 2**15).astype(numpy.int16) + wav_bytes = wav.tobytes() + frames = frame_generator(frame_duration, wav_bytes, sr) + + + if save_sr != orig_sr: + vocal_wav = librosa.resample(orig_wav, orig_sr=orig_sr, target_sr=sr, res_type='polyphase') + else: + vocal_wav = orig_wav + orig_sr, bgm_wav = read(bgmpath) + if bgm_wav.dtype == numpy.int16: + bgm_wav = bgm_wav / 32768. + if len(bgm_wav.shape) > 1: + bgm_wav = numpy.mean(bgm_wav, -1) + + return frames, wav, vocal_wav, bgm_wav + +def read_wave_to_frames(path, sr=16000, frame_duration=10): + #start_time = time.time() + #wav, orig_sr = librosa.load(path, sr=None, mono=True) + orig_sr, wav = read(path) + if wav.dtype == numpy.int16: + wav = wav / 32768. + if len(wav.shape) > 1: + wav = numpy.mean(wav, -1) + #print("load", time.time() - start_time) + #start_time = time.time() + wav = librosa.resample(wav, orig_sr=orig_sr, target_sr=sr, res_type='polyphase') + #wav = librosa.resample(wav, orig_sr=orig_sr, target_sr=sr, res_type='soxr_qq') + #wav, orig_sr = torchaudio.load(path) + #wav = torchaudio.functional.resample(wav, orig_sr, sr) + #wav = wav.numpy() + #print("resample", time.time() - start_time) + wav = (wav * 2**15).astype(numpy.int16) + wav_bytes = wav.tobytes() + frames = frame_generator(frame_duration, wav_bytes, sr) + return frames, wav + + +class Frame(object): + """Represents a "frame" of audio data.""" + def __init__(self, bytes, timestamp, duration): + self.bytes = bytes + self.timestamp = timestamp + self.duration = duration + + +def frame_generator(frame_duration_ms, audio, sample_rate): + """Generates audio frames from PCM audio data. + + Takes the desired frame duration in milliseconds, the PCM data, and + the sample rate. + + Yields Frames of the requested duration. + """ + n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) + offset = 0 + timestamp = 0.0 + duration = (float(n) / sample_rate) / 2.0 + while offset + n < len(audio): + yield Frame(audio[offset:offset + n], timestamp, duration) + timestamp += duration + offset += n + + +def vad_generator(frames, sr, vad): + vad_info = [] + for frame in frames: + vad_info.append(vad.is_speech(frame.bytes, sr)) + return vad_info + + +def vad_collector(sample_rate, frame_duration_ms, + padding_duration_ms, vad, frames): + """Filters out non-voiced audio frames. + + Given a webrtcvad.Vad and a source of audio frames, yields only + the voiced audio. + + Uses a padded, sliding window algorithm over the audio frames. + When more than 90% of the frames in the window are voiced (as + reported by the VAD), the collector triggers and begins yielding + audio frames. Then the collector waits until 90% of the frames in + the window are unvoiced to detrigger. + + The window is padded at the front and back to provide a small + amount of silence or the beginnings/endings of speech around the + voiced frames. + + Arguments: + + sample_rate - The audio sample rate, in Hz. + frame_duration_ms - The frame duration in milliseconds. + padding_duration_ms - The amount to pad the window, in milliseconds. + vad - An instance of webrtcvad.Vad. + frames - a source of audio frames (sequence or generator). + + Returns: A generator that yields PCM audio data. + """ + num_padding_frames = int(padding_duration_ms / frame_duration_ms) + # We use a deque for our sliding window/ring buffer. + ring_buffer = collections.deque(maxlen=num_padding_frames) + # We have two states: TRIGGERED and NOTTRIGGERED. We start in the + # NOTTRIGGERED state. + triggered = False + + voiced_frames = [] + for frame in frames: + is_speech = vad.is_speech(frame.bytes, sample_rate) + + sys.stdout.write('1' if is_speech else '0') + if not triggered: + ring_buffer.append((frame, is_speech)) + num_voiced = len([f for f, speech in ring_buffer if speech]) + # If we're NOTTRIGGERED and more than 90% of the frames in + # the ring buffer are voiced frames, then enter the + # TRIGGERED state. + if num_voiced > 0.9 * ring_buffer.maxlen: + triggered = True + sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,)) + # We want to yield all the audio we see from now until + # we are NOTTRIGGERED, but we have to start with the + # audio that's already in the ring buffer. + for f, s in ring_buffer: + voiced_frames.append(f) + ring_buffer.clear() + else: + # We're in the TRIGGERED state, so collect the audio data + # and add it to the ring buffer. + voiced_frames.append(frame) + ring_buffer.append((frame, is_speech)) + num_unvoiced = len([f for f, speech in ring_buffer if not speech]) + # If more than 90% of the frames in the ring buffer are + # unvoiced, then enter NOTTRIGGERED and yield whatever + # audio we've collected. + if num_unvoiced > 0.9 * ring_buffer.maxlen: + sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) + triggered = False + yield b''.join([f.bytes for f in voiced_frames]) + ring_buffer.clear() + voiced_frames = [] + if triggered: + sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) + sys.stdout.write('\n') + # If we have any leftover voiced audio when we run out of input, + # yield it. + if voiced_frames: + yield b''.join([f.bytes for f in voiced_frames]) + + +class ActivateInfo: + def __init__(self, active, duration, start_pos, end_pos, keep=True): + self.active = active + self.duration = duration + self.start_pos = start_pos + self.end_pos = end_pos + self.keep = keep + + def __add__(self, x): + return x + self.duration + + def __repr__(self) -> str: + return f"{self.active} {self.start_pos}, {self.end_pos}" + + +class SegmentInfo: + def __init__(self, type="raw", duration=0, start_pos=0, end_pos=0, frame_duration=10): + self.type = type + self.duration = duration + self.start_pos = start_pos + self.end_pos = end_pos + self.frame_duration = frame_duration + + def get_wav_seg(self, wav: numpy.array, sr: int, frame_duration: int=None): + fd = frame_duration if frame_duration is not None else self.frame_duration + sample_pre_frame = fd*sr/1000 + if self.type == "pad": + return numpy.zeros((int(sample_pre_frame*self.duration), ), dtype=numpy.int16) + return wav[int(self.start_pos*sample_pre_frame):int((self.end_pos*sample_pre_frame))] + + def __repr__(self) -> str: + if self.type == "raw": + text = f"{self.start_pos*self.frame_duration}:{self.end_pos*self.frame_duration}" + else: + text = f"[{self.duration*self.frame_duration}]" + return text + + +def get_sil_segments(active_info: ActivateInfo, sil_frame: int, attach_pos: str="mid") -> list: + if active_info.duration >= sil_frame: + if attach_pos == "tail": + seg = [SegmentInfo(start_pos=active_info.start_pos, end_pos=active_info.start_pos+sil_frame)] + elif attach_pos == "head": + seg = [SegmentInfo(start_pos=active_info.end_pos-sil_frame, end_pos=active_info.end_pos)] + elif attach_pos == "mid": + seg = [ + SegmentInfo(start_pos=active_info.start_pos, end_pos=active_info.start_pos+sil_frame // 2-1), + SegmentInfo(start_pos=active_info.end_pos-sil_frame // 2+1, end_pos=active_info.end_pos), + ] + else: + raise NotImplementedError + else: + if attach_pos == "tail": + seg = [ + SegmentInfo(start_pos=active_info.start_pos, end_pos=active_info.end_pos), + SegmentInfo(type="pad", duration=sil_frame-active_info.duration), + ] + elif attach_pos == "head": + seg = [ + SegmentInfo(type="pad", duration=sil_frame-active_info.duration), + SegmentInfo(start_pos=active_info.start_pos, end_pos=active_info.end_pos), + ] + elif attach_pos == "mid": + seg = [ + SegmentInfo(start_pos=active_info.start_pos, end_pos=active_info.end_pos), + ] + else: + raise NotImplementedError + return seg + + +def merge_segment(segment: list) -> list: + new_segment = [] + last_s = None + for s in segment: + s: SegmentInfo + if s.type == "pad": + if last_s is not None: + new_segment.append(last_s) + last_s = None + new_segment.append(s) + continue + if last_s is None: + last_s = s + else: + if last_s.end_pos+1 == s.start_pos: + last_s.end_pos = s.end_pos + else: + new_segment.append(last_s) + last_s = s + if last_s is not None: + new_segment.append(last_s) + return new_segment + + +def random_frame(min_frame, max_frame): + #return random.randint(min_frame, max_frame) + #mu = (max_frame + max_frame + min_frame) / 3 + mu = MU + #sigma = (max_frame - mu) / 3 + sigma = (mu - min_frame) / 3 + length = random.gauss(mu, sigma) + length = int(min(max(length, min_frame), max_frame)) + #print(length) + return length + + +def cut_points_generator( + vad_info, + min_active_frame=20, + sil_frame=50, + sil_mid_frame=100, + cut_min_frame=8 * 100, + cut_max_frame=20 * 100, + is_random_min_frame=False, + ): + curr_min_frame = cut_min_frame + last_active_frame = 0 + is_last_active = False + for i, is_curr_active in enumerate(vad_info): + if is_curr_active and not is_last_active: + last_active_frame = i + elif not is_curr_active and is_last_active and i - last_active_frame <= min_active_frame: + for j in range(last_active_frame, i): + vad_info[j] = False + is_last_active = is_curr_active + + start_pos = 0 + end_pos = 0 + duration = 0 + is_active = vad_info[0] + activate_info = [] + for pos, vi in enumerate(vad_info): + if is_active == vi: + duration += 1 + else: + activate_info.append(ActivateInfo(is_active, duration, start_pos, pos-1)) + is_active = vi + start_pos = pos + duration = 1 + activate_info.append(ActivateInfo(is_active, duration, start_pos, end_pos)) + # print(activate_info) + segment_info = [] + curr_segment = [] + curr_segment_duration = 0 + max_active_block = len(activate_info) + # 需要说明的是,active_info中必然是voice和unvoice交替的。 + for i in range(max_active_block): + curr_ai = activate_info[i] + # print("start", curr_segment_duration, curr_ai.duration) + if curr_ai.active: + # 当分片中的第一个段是voice时,往前添加静音 + if curr_segment_duration == 0: + if i == 0: + curr_segment.append(SegmentInfo("pad", sil_frame)) + else: + sil_seg = activate_info[i-1] + raw_sil_duration = min(sil_frame, sil_seg.duration // 2) + end_pos = sil_seg.end_pos + curr_segment = get_sil_segments( + ActivateInfo( + True, + duration=raw_sil_duration, + start_pos=sil_seg.end_pos-raw_sil_duration, + end_pos=sil_seg.end_pos + ), + sil_frame=sil_frame, + attach_pos="head" + ) + curr_segment_duration += sil_frame + # 然后判断往分片添加该voice段之后的长度变化 + next_duration = curr_segment_duration + curr_ai.duration + curr_ai_seg = SegmentInfo(start_pos=curr_ai.start_pos, end_pos=curr_ai.end_pos) + # print(next_duration) + if next_duration > cut_max_frame: + # 当添加该段后超出最大长度后,丢弃该分片中之前的段,仅保留当前段 + # 这里有个隐含的条件:每个分片中如果包含超过一个voice段,那么其总和必然短于最短长度。而当前段长度不短于cut_max_frame-curr_min_frame + if curr_ai.duration > curr_segment_duration: + new_segment = get_sil_segments(activate_info[i-1], sil_frame, "head") + new_segment.append(curr_ai_seg) + if i < max_active_block - 1: + new_segment.extend(get_sil_segments(activate_info[i+1], sil_frame, "tail")) + else: + new_segment.append(SegmentInfo(type="pad", duration=sil_frame)) + # print("1", len(segment_info), curr_segment) + segment_info.append(merge_segment(new_segment)) + if is_random_min_frame: + curr_min_frame = random_frame(cut_min_frame, cut_max_frame) + curr_segment = [] + curr_segment_duration = 0 + else: + # print("2", len(segment_info), curr_segment) + if curr_segment_duration > 10 * 100: + segment_info.append(merge_segment(curr_segment)) + if is_random_min_frame: + curr_min_frame = random_frame(cut_min_frame, cut_max_frame) + curr_segment = get_sil_segments(activate_info[i-1], sil_frame, "head") + curr_segment.append(curr_ai_seg) + curr_segment_duration = sil_frame + curr_ai.duration + elif next_duration > curr_min_frame: + # 长度足够就添加尾部静音后保存该分片,开新分片 + curr_segment.append(curr_ai_seg) + if i < max_active_block - 1: + # print(activate_info[i+1]) + curr_segment.extend(get_sil_segments(activate_info[i+1], sil_frame, "tail")) + else: + curr_segment.append(SegmentInfo(type="pad", duration=sil_frame)) + # print("3", len(segment_info), curr_segment) + segment_info.append(merge_segment(curr_segment)) + if is_random_min_frame: + curr_min_frame = random_frame(cut_min_frame, cut_max_frame) + curr_segment = [] + curr_segment_duration = 0 + else: + # 不够就加上然后等待 + curr_segment.append(curr_ai_seg) + curr_segment_duration += curr_ai.duration + else: + # 处理静音 + if curr_segment_duration == 0: + raw_sil_duration = min(sil_frame, curr_ai.duration // 2) + end_pos = curr_ai.end_pos + curr_segment = get_sil_segments( + ActivateInfo( + True, + duration=raw_sil_duration, + start_pos=curr_ai.end_pos-raw_sil_duration, + end_pos=curr_ai.end_pos + ), + sil_frame=sil_frame, + attach_pos="head" + ) + curr_segment_duration += sil_frame + else: + # 对于出现的静音片段,剪切到sil_mid_frame长度内 + #curr_segment.extend(get_sil_segments(curr_ai, sil_mid_frame, attach_pos="mid")) + #curr_segment_duration += min(sil_mid_frame, curr_ai.duration) + if curr_ai.duration > sil_mid_frame: + curr_segment.extend(get_sil_segments(curr_ai, sil_frame, "tail")) + segment_info.append(merge_segment(curr_segment)) + if is_random_min_frame: + curr_min_frame = random_frame(cut_min_frame, cut_max_frame) + curr_segment = [] + curr_segment_duration = 0 + else: + # 对于出现的静音片段,剪切到sil_mid_frame长度内 + curr_segment.extend(get_sil_segments(curr_ai, sil_mid_frame+1, attach_pos="mid")) + curr_segment_duration += min(sil_mid_frame, curr_ai.duration) + # print(curr_segment_duration, curr_segment) + if len(curr_segment) > 3 and curr_segment_duration > 7 * 100: + if activate_info[-1].active: + curr_segment.append(SegmentInfo(type="pad", duration=sil_frame)) + segment_info.append(merge_segment(curr_segment)) + return segment_info + + +def cut_points_storage_generator(raw_vad_info, cut_points: list, frame_duration=10) -> list: + raw_vad_content = " ".join(["1" if i else "0" for i in raw_vad_info]) + content = [] + for cut_point in cut_points: + line = [] + for s in cut_point: + s.frame_duration = frame_duration + line.append(str(s)) + content.append("|".join(line)) + return raw_vad_content, "\n".join(content) + + +def wavs_generator(raw_wav: numpy.array, cut_points: list, filename: str, sr: int, frame_duration: int) -> list: + wavs = [] + for idx, cp in enumerate(cut_points): + clip = numpy.concatenate( + [s.get_wav_seg(raw_wav, sr, frame_duration) for s in cp], + axis=0 + ) + wavs.append((clip, f"{filename}_{idx}_{int(clip.shape[0]/sr*1000)}.wav")) + return wavs + \ No newline at end of file diff --git a/data_pipeline/vad/vad_webrtcvad.py b/data_pipeline/vad/vad_webrtcvad.py new file mode 100644 index 0000000000000000000000000000000000000000..92ec62a3447f1046f7c99a5c23a5cb395b921b67 --- /dev/null +++ b/data_pipeline/vad/vad_webrtcvad.py @@ -0,0 +1,186 @@ +import webrtcvad +import torch.multiprocessing as mp +import os +import threading +from tqdm import tqdm +import sys +from scipy.io.wavfile import write +import traceback +import librosa +import argparse +import glob +import time +import random + +vocal_file_lock = threading.Lock() +bgm_file_lock = threading.Lock() + +from vad_tool import read_wave_to_frames, read_wave_to_frames_withbgm, vad_generator, cut_points_generator, cut_points_storage_generator, wavs_generator + +LOGGING_INTERVAL = 3 +#SAMPLE_RATE = 44100 +#SAMPLE_RATE = 16000 +SAMPLE_RATE = 48000 +SAVE_SAMPLE_RATE = 44100 +FRAME_DURATION = 10 + +SAVE_SAMPLE_PER_FRAME = int(FRAME_DURATION * SAVE_SAMPLE_RATE / 1000) + +MIN_ACTIVE_TIME_MS = 200 +SIL_HEAD_TAIL_MS = 500 +#SIL_HEAD_TAIL_MS = 3000 +SIL_MID_MS = 3000 +CUT_MIN_MS = 3000 +CUT_MAX_MS = 30000 + +MIN_ACTIVE_FRAME = MIN_ACTIVE_TIME_MS // FRAME_DURATION +SIL_FRAME = SIL_HEAD_TAIL_MS // FRAME_DURATION +SIL_MID_FRAME = SIL_MID_MS // FRAME_DURATION +CUT_MIN_FRAME = CUT_MIN_MS // FRAME_DURATION +CUT_MAX_FRAME = CUT_MAX_MS // FRAME_DURATION +RANDOM_MIN_FRAME = True + +import torch + +def gpu_holder(rank, a): + device=f'cuda:{rank}' + conv = torch.nn.Conv1d(1024, 1024, 9, padding=4) + conv.to(device) + while True: + x = torch.rand((8, 1024, 128), device=device) + y = conv(x) + + + + +def inference(rank, out_dir, filelist_name, queue: mp.Queue): + vocal_out_dir = os.path.join(out_dir, "vocal_cut") + bgm_out_dir = os.path.join(out_dir, "bgm_cut") + info_dir = os.path.join(out_dir, "vad_info") + os.makedirs(vocal_out_dir, exist_ok=True) + os.makedirs(bgm_out_dir, exist_ok=True) + os.makedirs(info_dir, exist_ok=True) + + def write_to_file(file_path, data, file_lock): + with file_lock: + with open(file_path, 'a') as f: + f.write(data) + while True: + input_path = queue.get() + if input_path is None: + break + try: + vad_tools = webrtcvad.Vad(3) # create a new vad each time to avoid some bugs + vocal_path, bgm_path = input_path[0] + filename = os.path.basename(vocal_path).replace(".wav", "") + #frames, wav = read_wave_to_frames(vocal_path, SAMPLE_RATE, FRAME_DURATION) + frames, wav, vocal_wav, bgm_wav = read_wave_to_frames_withbgm(vocal_path, bgm_path, SAMPLE_RATE, SAVE_SAMPLE_RATE, FRAME_DURATION) + vad_info = vad_generator(frames, SAMPLE_RATE, vad_tools) + + cut_points = cut_points_generator(vad_info, MIN_ACTIVE_FRAME, SIL_FRAME, SIL_MID_FRAME, CUT_MIN_FRAME, CUT_MAX_FRAME, RANDOM_MIN_FRAME) + raw_vad_content, file_content = cut_points_storage_generator(vad_info, cut_points, FRAME_DURATION) + + with open(os.path.join(info_dir, filename+".raw_info.txt"), "w") as f: + f.write(raw_vad_content) + with open(os.path.join(info_dir, filename+".txt"), "w") as f: + f.write(file_content) + + wavs = wavs_generator(vocal_wav, cut_points, filename, SAVE_SAMPLE_RATE, FRAME_DURATION) + bgm_wavs = wavs_generator(bgm_wav, cut_points, filename, SAVE_SAMPLE_RATE, FRAME_DURATION) + for ((wav_seg, name), (bgm_wav_seg, _)) in zip(wavs, bgm_wavs): + if wav_seg.shape[-1] < SAVE_SAMPLE_RATE * CUT_MIN_MS / 1000: + continue + write(os.path.join(vocal_out_dir, name), SAVE_SAMPLE_RATE, wav_seg) + write(os.path.join(bgm_out_dir, name), SAVE_SAMPLE_RATE, bgm_wav_seg) + + except Exception as e: + traceback.print_exc() + print(e) + +def setInterval(interval): + def decorator(function): + def wrapper(*args, **kwargs): + stopped = threading.Event() + + def loop(): # executed in another thread + while not stopped.wait(interval): # until stopped + function(*args, **kwargs) + + t = threading.Thread(target=loop) + t.daemon = True # stop if the program exits + t.start() + return stopped + + return wrapper + + return decorator + + +last_batches = None + + +@setInterval(LOGGING_INTERVAL) +def QueueWatcher(queue, bar): + global last_batches + curr_batches = queue.qsize() + bar.update(last_batches-curr_batches) + last_batches = curr_batches + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--filelist_or_dir", type=str, required=True, help="Path to save checkpoints") + parser.add_argument("--out_dir", type=str, required=True, help="Path to save checkpoints") + parser.add_argument("--jobs", type=int, required=False, default=2, help="Path to save checkpoints") + parser.add_argument("--log_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints") + parser.add_argument("--model_dir", type=str, required=False, default="large-v3", help="Path to save checkpoints") + args = parser.parse_args() + + filelist_or_dir = args.filelist_or_dir + out_dir = args.out_dir + NUM_THREADS = args.jobs + + if os.path.isfile(filelist_or_dir): + filelist_name = filelist_or_dir.split('/')[-1].split('.')[0] + generator = [os.path.basename(x) for x in open(filelist_or_dir).read().splitlines()] + else: + filelist_name = "single" + generator = [(os.path.join(os.path.dirname(os.path.dirname(x)), "vocal", os.path.basename(x)), os.path.join(os.path.dirname(os.path.dirname(x)), "bgm", os.path.basename(x))) for x in glob.glob(f"{filelist_or_dir}/*.wav")] + + #mp.set_start_method('spawn',force=True) + + print(f"Running with {NUM_THREADS} threads and batchsize 1") + processes = [] + queue = mp.Queue() + for rank in range(NUM_THREADS): + p = mp.Process(target=inference, args=(rank, out_dir, filelist_name, queue), daemon=True) + p.start() + processes.append(p) + + for i in range(4): + rank = i % torch.cuda.device_count() + p = mp.Process(target=gpu_holder, args=(rank, 0), daemon=True) + p.start() + #processes.append(p) + + accum = [] + tmp_file = [] + + + for filename in tqdm(generator): + #accum.append((os.path.join(out_dir, "vocal", filename), os.path.join(out_dir, "bgm", filename))) + accum.append(filename) + if len(accum) == 1: + queue.put(accum.copy()) + accum.clear() + + + for _ in range(NUM_THREADS): + queue.put(None) + + last_batches = queue.qsize() + bar = tqdm(total=last_batches) + queue_watcher = QueueWatcher(queue, bar) + for p in processes: + p.join() + queue_watcher.set() diff --git a/rap_songs.csv b/rap_songs.csv new file mode 100644 index 0000000000000000000000000000000000000000..22ec3b73770388b6f971c022c98eb435df02e5ee --- /dev/null +++ b/rap_songs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:076f81872c055c092c3e6dd20e304e28e71a7283e92de4b76359fe0f84e09dcd +size 13222429