nicolaus625 commited on
Commit
659e74f
·
verified ·
1 Parent(s): 6cdf0ac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/01-黄炳强-海瑞背纤:我自从到淳安正印执掌_2.flac +3 -0
  2. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/05-黄炳强-朱痕记:听我妻赵锦棠言讲一遍_1.flac +3 -0
  3. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/05-黄炳强-朱痕记:听我妻赵锦棠言讲一遍_2.flac +3 -0
  4. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/06-黄炳强-朱痕记:见坟台不由人泪流满面_4.flac +3 -0
  5. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/07-黄炳强-洪羊洞:为国家哪何曾半日闲空_1.flac +3 -0
  6. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/08-黄炳强-洪羊洞:叹杨家投宋主心血用尽_1.flac +3 -0
  7. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/10-黄炳强-文昭关:一轮明月照窗前_2.flac +3 -0
  8. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/10-黄炳强-文昭关:一轮明月照窗前_3.flac +3 -0
  9. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/11-黄炳强-文昭关:心中有事难合眼_2.flac +3 -0
  10. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/14-黄炳强-鱼肠剑:站在街头用目觑_1.flac +3 -0
  11. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/14-黄炳强-鱼肠剑:站在街头用目觑_5.flac +3 -0
  12. 128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/15-黄炳强-鱼肠剑:富贵穷通不由己_2.flac +3 -0
  13. Beat-Transformer/README.md +74 -0
  14. Beat-Transformer/ballroom_1train.jsonl +0 -0
  15. Beat-Transformer/carnetic_1train.jsonl +0 -0
  16. Beat-Transformer/code/DilatedTransformer.py +167 -0
  17. Beat-Transformer/code/DilatedTransformerLayer.py +183 -0
  18. Beat-Transformer/code/__pycache__/spectrogram_dataset.cpython-39.pyc +0 -0
  19. Beat-Transformer/code/ablation_models/DilatedTransformerLayer.py +183 -0
  20. Beat-Transformer/code/ablation_models/music_transformer.py +145 -0
  21. Beat-Transformer/code/ablation_models/non_demix_model.py +261 -0
  22. Beat-Transformer/code/ablation_models/non_demix_spectrogram_dataset.py +393 -0
  23. Beat-Transformer/code/ablation_models/tcn.py +121 -0
  24. Beat-Transformer/code/ablation_models/tcn_demix_model.py +187 -0
  25. Beat-Transformer/code/ablation_models/utils.py +301 -0
  26. Beat-Transformer/code/eight_fold_test.py +403 -0
  27. Beat-Transformer/code/optimizer.py +100 -0
  28. Beat-Transformer/code/spectrogram_dataset.py +427 -0
  29. Beat-Transformer/code/train.py +396 -0
  30. Beat-Transformer/code/train.sh +9 -0
  31. Beat-Transformer/code/utils.py +301 -0
  32. Beat-Transformer/code/visiualize_attention.py +150 -0
  33. Beat-Transformer/data/SMC/.DS_Store +0 -0
  34. Beat-Transformer/data/SMC/HolzapfelEtAl12-taslp.pdf +0 -0
  35. Beat-Transformer/data/SMC/SMC_MIREX_Readme.txt +58 -0
  36. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_048.tag +3 -0
  37. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_133.tag +8 -0
  38. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_158.tag +5 -0
  39. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_208.tag +5 -0
  40. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_217.tag +5 -0
  41. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_261.tag +5 -0
  42. Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_279.tag +2 -0
  43. Beat-Transformer/data/demix_spectrogram_data.txt +3 -0
  44. Beat-Transformer/gtzan_test.jsonl +0 -0
  45. Beat-Transformer/hainsworth_1train.jsonl +0 -0
  46. Beat-Transformer/sft.py +170 -0
  47. Beat-Transformer/smc_1train.jsonl +0 -0
  48. MSD/1/0/1004941.clip.wav +3 -0
  49. MSD/1/0/1026549.clip.wav +3 -0
  50. MSD/1/0/1036772.clip.wav +3 -0
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/01-黄炳强-海瑞背纤:我自从到淳安正印执掌_2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa0fd96245c3a6d57f0f1f15e4a29cc4e528d575c2e6ec9ad9273dd537882efe
3
+ size 2871501
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/05-黄炳强-朱痕记:听我妻赵锦棠言讲一遍_1.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad2adb9943f5dba4214de799a0659a187daf36960599dcde235ef2d866f434cf
3
+ size 2867895
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/05-黄炳强-朱痕记:听我妻赵锦棠言讲一遍_2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26d82a173038228ba0d63a6aff489fd3457540d2befbcf16c9df616432621fb
3
+ size 2911303
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/06-黄炳强-朱痕记:见坟台不由人泪流满面_4.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26f72ca6f078c964675258f709516fd760ae35af4da73be195e9a01397c541c
3
+ size 2767808
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/07-黄炳强-洪羊洞:为国家哪何曾半日闲空_1.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a8ed3b0148dd492ce0031dfe2fe0fa4f05382b71d21d84d43bc1b118c4d3e0
3
+ size 2712232
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/08-黄炳强-洪羊洞:叹杨家投宋主心血用尽_1.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38462edbc217e89ec03c1540a76a5e36d5c900aee8f20077abdfef01e029ba95
3
+ size 2622302
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/10-黄炳强-文昭关:一轮明月照窗前_2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:522dcae832b65012ebb6fd5b80315274ffed276a4354949ac0f2b23cd702bc39
3
+ size 2854300
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/10-黄炳强-文昭关:一轮明月照窗前_3.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5522fe2373869bf3cb29d8f0c0d6931bcc640249909fc3d83d59085ab101649
3
+ size 2757299
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/11-黄炳强-文昭关:心中有事难合眼_2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:498975c1af2022443da437f7a121f79addb1bc0e1ce04b65f99df028c195a34b
3
+ size 2954643
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/14-黄炳强-鱼肠剑:站在街头用目觑_1.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c642a5b3cca2843c18112cc5fbf4c0dfaa2af6bde2514bcc5df7c1e806d4c24
3
+ size 2924549
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/14-黄炳强-鱼肠剑:站在街头用目觑_5.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243f81a76b4bf19fcf49b17736be15ad74ea5e4d2de16d752b8f60e9e6da16dc
3
+ size 2940410
128_Compmusic-Jingju_Audio_Recordings_Collection_english_version-music_captions/Jingju_Audio_Recordings_Collection_cut/京剧之星:黄炳强专辑/15-黄炳强-鱼肠剑:富贵穷通不由己_2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b38a65d2b38d97c15ff55d42c6709ad0877bed0ce57c701fb46d9cdc96be5fa
3
+ size 3279926
Beat-Transformer/README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Beat Transformer
2
+ <a href="https://colab.research.google.com/drive/1IdrpMO1AivWmy-Bm8ktmMy14ED9jllux?usp=sharing)" rel="nofollow"><img src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667" alt="Open In Colab" data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" style="max-width: 100%;"></a>
3
+
4
+ Repository for paper: [Beat Transformer: Demixed Beat and Downbeat Tracking with Dilated Self-Attention](https://arxiv.org/abs/2209.07140) in Proceedings of the 23rd International Society for Music Information Retrieval Conference (ISMIR 2022), Bengaluru, India.
5
+
6
+ Welcome to test our model on your own music at our [Google Colab](https://colab.research.google.com/drive/1IdrpMO1AivWmy-Bm8ktmMy14ED9jllux?usp=sharing).
7
+
8
+
9
+ ## Code and File Directory
10
+
11
+ This repository is organized as follows:
12
+
13
+ ```
14
+ root
15
+
16
+ └───checkpoint PyTorch model checkpoints
17
+ │ ···
18
+
19
+ └───code
20
+ └───ablation_models ablation models
21
+ │ ···
22
+ │ DilatedTransformer.py Beat Transformer model
23
+ │ DilatedTransformerLayer.py Dilated Self-Attention
24
+ │ spectrogram_dataset.py data loader
25
+ │ train.py training script
26
+ │ ... code for other utilities
27
+
28
+ └───data
29
+ └───audio_lists Order info of pieces in each dataset
30
+ │ ···
31
+ │ demix_spectrogram_data.npz demixed spectrogram data (33GB, to be downloaded)
32
+ │ full_beat_annotation.npz beat/downbeat annotation
33
+
34
+ └───preprocessing code for data pre-processing
35
+ │ ···
36
+
37
+ └───save training log and more
38
+ │ ···
39
+ ```
40
+
41
+
42
+ ## How to run
43
+ * To quickly reproduce the accuracy reported in our paper, simply run `./code/eight_fold_test.py`.
44
+ * To quickly test our model with your own music, welcome to our [Google Colab](https://colab.research.google.com/drive/1IdrpMO1AivWmy-Bm8ktmMy14ED9jllux?usp=sharing).
45
+ * If you wish to train our model from scratch, first download our [processed dataset](https://drive.google.com/file/d/1LamSAEY5QsnY57cF6qH_0niesGGKkHtI/view?usp=sharing) (33GB in total, including demixed spectrogram data of Ballroom, Hainsworth, Carnetic, Harmonix, SMC, and GTZAN).
46
+ * Executing `./code/train.sh` will train our model in 8-fold cross validation. If you wish to train one single fold, you can run `./code/train.py` after specifying `DEBUG_MODE`, `FOLD`, and `GPU`. When `DEBUG_MODE=1`, it will load a small portion of data to quickly run through with a smaller bach size.
47
+ * We also release out ablation model architectures in `./code/ablation_models`. We release our data processing scripts in `./preprocessing/demixing.py`, where we call [Spleeter](https://github.com/deezer/spleeter) to demix each piece and save the demixed spectrogram.
48
+
49
+ ## Audio Data
50
+ We use a total of 7 datasets for model training and testing. If you wish to acquire the audio data, you can follow the following guidelines:
51
+ * Ballroom Dataset (audio) is available [here](http://mtg.upf.edu/ismir2004/contest/tempoContest/node5.html). There are 13 duplicated pieces and I discarded them in my experiments. For more information, see [here](https://github.com/CPJKU/BallroomAnnotations/blob/master/README.md).
52
+
53
+ * Hainsworth Dataset (audio) is no longer accessible via the original link. Since Hainsworth is a well-known public dataset, I guess it's okay to share my copy. You can download Hainsworth [here](https://drive.google.com/file/d/1ctMDHAoeTBG5LSbtQIQBIv4vTI0oB0u1/view).
54
+
55
+ * GTZAN Dataset (audio) is available on [Kaggle](https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification). You need a registered Kaggle account to download it.
56
+
57
+ * SMC Dataset (audio) is available [here](https://joserzapata.github.io/publication/selective-sampling-beat-tracking/).
58
+
59
+ * Carnatic Dataset (audio) is on [Zenodo](https://zenodo.org/record/1264394). You can download it by request.
60
+
61
+ * Harmonix Dataset (mel-spectrogram) is available [here](https://github.com/urinieto/harmonixset). I used the Griffin-Lim algorithm in Librosa to convert mel-spectrogram to audio, which (however) is lossful. My conversion code is [here](https://github.com/zhaojw1998/Beat-Transformer/blob/main/preprocessing/harmonix_mel2wav.py).
62
+
63
+ * RWC POP (audio) seems NOT royalty-free so I'm afraid I cannot share the audio. For more info about this dataset, you can go to its [official webpage](https://staff.aist.go.jp/m.goto/RWC-MDB/).
64
+
65
+ For the beat/downbeat annotation of Ballroom, GTZAN, SMC, and Hainsworth, I used the annotation released by Sebastian Böck [here](https://github.com/superbock/ISMIR2019).
66
+
67
+
68
+
69
+ ## Contact
70
+ Jingwei Zhao (PhD student in Data Science at NUS)
71
+
72
+ jzhao@u.nus.edu
73
+
74
+ Nov. 24, 2022
Beat-Transformer/ballroom_1train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
Beat-Transformer/carnetic_1train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
Beat-Transformer/code/DilatedTransformer.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from DilatedTransformerLayer import DilatedTransformerLayer
4
+ from torch.nn import TransformerEncoderLayer as torchTransformerEncoderLayer
5
+
6
+
7
+ class Demixed_DilatedTransformerModel(nn.Module):
8
+ def __init__(self, attn_len=5, instr=5, ntoken=2, dmodel=128, nhead=2, d_hid=512, nlayers=9, norm_first=True, dropout=.1):
9
+ super(Demixed_DilatedTransformerModel, self).__init__()
10
+ self.nhead = nhead
11
+ self.nlayers = nlayers
12
+ self.attn_len = attn_len
13
+ self.head_dim = dmodel // nhead
14
+ self.dmodel = dmodel
15
+ assert self.head_dim * nhead == dmodel, "embed_dim must be divisible by num_heads"
16
+
17
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(5, 3), stride=1, padding=(2, 0))#126
18
+ self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#42
19
+ self.dropout1 = nn.Dropout(p=dropout)
20
+
21
+ self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 12), stride=1, padding=(0, 0))#31
22
+ self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#10
23
+ self.dropout2 = nn.Dropout(p=dropout)
24
+
25
+ self.conv3 = nn.Conv2d(in_channels=64, out_channels=dmodel, kernel_size=(3, 6), stride=1, padding=(1, 0))#5
26
+ self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#1
27
+ self.dropout3 = nn.Dropout(p=dropout)
28
+
29
+ self.Transformer_layers = nn.ModuleDict({})
30
+ for idx in range(nlayers):
31
+ self.Transformer_layers[f'time_attention_{idx}'] = DilatedTransformerLayer(dmodel, nhead, d_hid, dropout, Er_provided=False, attn_len=attn_len, norm_first=norm_first)
32
+ if (idx >= 3) and (idx <= 5):
33
+ self.Transformer_layers[f'instr_attention_{idx}'] = torchTransformerEncoderLayer(dmodel, nhead, d_hid, dropout, batch_first=True, norm_first=norm_first)
34
+
35
+ self.out_linear = nn.Linear(dmodel, ntoken)
36
+
37
+ self.dropout_t = nn.Dropout(p=.5)
38
+ self.out_linear_t = nn.Linear(dmodel, 300)
39
+
40
+
41
+ def forward(self, x):
42
+ #x: (batch, instr, time, dmodel), FloatTensor
43
+ batch, instr, time, melbin = x.shape
44
+ x = x.reshape(-1, 1, time, melbin)
45
+ x = self.conv1(x)
46
+ x = self.maxpool1(x)
47
+ x = torch.relu(x)
48
+ x = self.dropout1(x)
49
+
50
+ x = self.conv2(x)
51
+ x = self.maxpool2(x)
52
+ x = torch.relu(x)
53
+ x = self.dropout2(x)
54
+
55
+ x = self.conv3(x)
56
+ x = self.maxpool3(x)
57
+ x = torch.relu(x)
58
+ x = self.dropout3(x) #(batch*instr, channel, time, 1)
59
+
60
+ x = x.reshape(-1, self.dmodel, time).transpose(1, 2) #(batch*instr, time, channel=dmodel)
61
+ t = []
62
+
63
+ for layer in range(self.nlayers):
64
+ x, skip = self.Transformer_layers[f'time_attention_{layer}'](x, layer=layer)
65
+ skip = skip.reshape(batch, instr, time, self.dmodel)
66
+ t.append(skip.mean(1))
67
+
68
+ if (layer >= 3) and (layer <= 5):
69
+ x = x.reshape(batch, instr, time, self.dmodel)
70
+ x = x.permute(0, 2, 1, 3)
71
+ x = x.reshape(-1, instr, self.dmodel)
72
+
73
+ x = self.Transformer_layers[f'instr_attention_{layer}'](x)
74
+
75
+ x = x.reshape(batch, time, instr, self.dmodel)
76
+ x = x.permute(0, 2, 1, 3)
77
+ x = x.reshape(-1, time, self.dmodel)
78
+
79
+ x = torch.relu(x)
80
+ x = x.reshape(batch, instr, time, self.dmodel)
81
+ x = x.mean(1)
82
+ x = self.out_linear(x)
83
+
84
+ t = torch.stack(t, axis=-1).sum(dim=-1)
85
+ t = torch.relu(t)
86
+ t = self.dropout_t(t)
87
+ t = t.mean(dim=1) #(batch, dmodel)
88
+ t = self.out_linear_t(t)
89
+
90
+ return x, t
91
+
92
+ def inference(self, x):
93
+ #x: (batch, instr, time, dmodel), FloatTensor
94
+ #This inference method also outputs the cumulative attention matrix
95
+ batch, instr, time, melbin = x.shape
96
+ x = x.reshape(-1, 1, time, melbin)
97
+ x = self.conv1(x)
98
+ x = self.maxpool1(x)
99
+ x = torch.relu(x)
100
+ x = self.dropout1(x)
101
+
102
+ x = self.conv2(x)
103
+ x = self.maxpool2(x)
104
+ x = torch.relu(x)
105
+ x = self.dropout2(x)
106
+
107
+ x = self.conv3(x)
108
+ x = self.maxpool3(x)
109
+ x = torch.relu(x)
110
+ x = self.dropout3(x) #(batch*instr, channel, time, 1)
111
+
112
+ x = x.reshape(-1, self.dmodel, time).transpose(1, 2) #(batch*instr, time, channel=dmodel)
113
+ t = []
114
+
115
+ attn = [torch.eye(time, device=x.device).repeat(batch, self.nhead, 1, 1)]
116
+
117
+ for layer in range(self.nlayers):
118
+ x, skip, layer_attn = self.Transformer_layers[f'time_attention_{layer}'].inference(x, layer=layer)
119
+ skip = skip.reshape(batch, instr, time, self.dmodel)
120
+ t.append(skip.mean(1))
121
+
122
+ attn.append(torch.matmul(attn[-1], layer_attn.transpose(-2, -1)))
123
+
124
+ if (layer >= 3) and (layer <= 5):
125
+ x = x.reshape(batch, instr, time, self.dmodel)
126
+ x = x.permute(0, 2, 1, 3)
127
+ x = x.reshape(-1, instr, self.dmodel)
128
+
129
+ x = self.Transformer_layers[f'instr_attention_{layer}'](x)
130
+
131
+ x = x.reshape(batch, time, instr, self.dmodel)
132
+ x = x.permute(0, 2, 1, 3)
133
+ x = x.reshape(-1, time, self.dmodel)
134
+
135
+ x = torch.relu(x)
136
+ x = x.reshape(batch, instr, time, self.dmodel)
137
+ x = x.mean(1)
138
+ x = self.out_linear(x)
139
+
140
+ t = torch.stack(t, axis=-1).sum(dim=-1)
141
+ t = torch.relu(t)
142
+ t = self.dropout_t(t)
143
+ t = t.mean(dim=1) #(batch, dmodel)
144
+ t = self.out_linear_t(t)
145
+
146
+ return x, t, attn
147
+
148
+
149
+
150
+
151
+ if __name__ == '__main__':
152
+ from spectrogram_dataset import audioDataset
153
+ from torch.utils.data import DataLoader
154
+
155
+ DEVICE = 'cuda:0'
156
+ model = Demixed_DilatedTransformerModel(attn_len=5, instr=5, ntoken=2, dmodel=256, nhead=8, d_hid=1024, nlayers=9, norm_first=True, dropout=.1)
157
+ model.to(DEVICE)
158
+ model.eval()
159
+
160
+ for name, param in model.state_dict().items():
161
+ print(name, param.shape)
162
+ # name: str
163
+ # param: Tensor
164
+
165
+ total = sum([param.nelement() for param in model.parameters()])
166
+ print(total)
167
+ #print("Number of parameter: %.2fM" % (total/1e6))
Beat-Transformer/code/DilatedTransformerLayer.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import nn
5
+ from torch.nn.modules.normalization import LayerNorm
6
+
7
+
8
+ class DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(nn.Module):
9
+ def __init__(self, dmodel, num_heads, dropout=0, Er_provided=False, attn_len=5):
10
+ super(DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding, self).__init__()
11
+ self.attn_len = attn_len
12
+ self.dmodel = dmodel
13
+ self.num_heads = num_heads
14
+ self.head_dim = dmodel // num_heads
15
+ assert self.head_dim * num_heads == dmodel, "embed_dim must be divisible by num_heads"
16
+
17
+ self.key = nn.Linear(dmodel, dmodel)
18
+ self.value = nn.Linear(dmodel, dmodel)
19
+ self.query = nn.Linear(dmodel, dmodel)
20
+ self.dropout = nn.Dropout(dropout)
21
+ self.Er_provided = Er_provided
22
+
23
+ if not Er_provided:
24
+ self.Er = nn.Parameter(torch.randn(num_heads, self.head_dim, attn_len))
25
+
26
+
27
+ def forward(self, query, key, value, layer=0):
28
+ #query, key, and value: (batch, time, dmodel), float tensor
29
+
30
+ batch, time, d_model = query.shape
31
+
32
+ q = self.query(query).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
33
+ k = self.key(key).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
34
+ v = self.value(value).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
35
+
36
+ k = torch.cat(
37
+ (
38
+ self.kv_roll(k[:, 0: 4], layer, padding_value=0, shift=0),
39
+ self.kv_roll(k[:, 4: 5], layer, padding_value=0, shift=-2),
40
+ self.kv_roll(k[:, 5: 6], layer, padding_value=0, shift=-1),
41
+ self.kv_roll(k[:, 6: 7], layer, padding_value=0, shift=1),
42
+ self.kv_roll(k[:, 6: 7], layer, padding_value=0, shift=2) #This should be k[:, 7: 8]. We had this bug during training so we keep it to fit the checkpoints.
43
+ ),
44
+ dim=1
45
+ ) #we define 4 symmetrical heads and 4 skewed heads
46
+
47
+ v = torch.cat(
48
+ (
49
+ self.kv_roll(v[:, 0: 4], layer, padding_value=0, shift=0),
50
+ self.kv_roll(v[:, 4: 5], layer, padding_value=0, shift=-2),
51
+ self.kv_roll(v[:, 5: 6], layer, padding_value=0, shift=-1),
52
+ self.kv_roll(v[:, 6: 7], layer, padding_value=0, shift=1),
53
+ self.kv_roll(v[:, 7: 8], layer, padding_value=0, shift=2)
54
+ ),
55
+ dim=1
56
+ ) #we define 4 symmetrical heads and 4 skewed heads
57
+
58
+ Er_t = self.Er.unsqueeze(1).unsqueeze(0) #(1, num_head, 1, head_dim, attn_len)
59
+
60
+ qk = torch.matmul(q, k.transpose(-2, -1))
61
+ attn_mask = torch.zeros_like(qk).masked_fill_((qk==0), float('-inf'))
62
+ attn = (qk + torch.matmul(q, Er_t)) / math.sqrt(self.head_dim)
63
+ attn = F.softmax(attn + attn_mask, dim=-1)
64
+
65
+ out = torch.matmul(attn, v) #(batch, num_head, time, 1, head_dim)
66
+ out = out.squeeze(-2).transpose(1, 2).reshape(batch, time, d_model)
67
+
68
+ return self.dropout(out), attn
69
+
70
+
71
+
72
+ def kv_roll(self, tensor, layer, padding_value=0, shift=1):
73
+ #tensor: (batch, num_head, time, 1, head_dim)
74
+ batch, num_head, time, _, head_dim = tensor.shape
75
+
76
+ tensor = F.pad(tensor, (0, 0, 0, 0, (2**layer)*(self.attn_len//2), (2**layer)*(self.attn_len//2)), mode='constant', value=padding_value)
77
+ #(batch, num_head, time+(2**layer)*(self.attn_len//2), 1, head_dim)
78
+
79
+ tensor = torch.cat([torch.roll(tensor, shifts=-i*(2**layer), dims=2) for i in range(shift, self.attn_len+shift)], dim=-2)
80
+ #(batch, num_head, time+(2**layer)*(self.attn_len//2), attn_len, head_dim)
81
+
82
+ return tensor[:, :, :time, :, :] #(batch, num_head, time, attn_len, head_dim)
83
+
84
+
85
+
86
+
87
+ class DilatedTransformerLayer(nn.Module):
88
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, Er_provided=False, attn_len=5, norm_first=False, layer_norm_eps=1e-5):
89
+ super(DilatedTransformerLayer, self).__init__()
90
+ self.self_attn = DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(d_model, nhead, dropout, Er_provided, attn_len)
91
+ # Implementation of Feedforward model
92
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
93
+ self.dropout = nn.Dropout(dropout)
94
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
95
+
96
+ self.norm_first = norm_first
97
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)
98
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)
99
+ self.dropout1 = nn.Dropout(dropout)
100
+ self.dropout2 = nn.Dropout(dropout)
101
+
102
+ self.activation = F.gelu
103
+
104
+
105
+ def forward(self, x, layer=0):
106
+ #x: (batch, time, dmodel)
107
+ if self.norm_first:
108
+ x_ = self._sa_block(self.norm1(x), layer)[0]
109
+ x = x + x_
110
+ x = x + self._ff_block(self.norm2(x))
111
+ else:
112
+ x_ = self._sa_block(x, layer)[0]
113
+ x = self.norm1(x + x_)
114
+ x = self.norm2(x + self._ff_block(x))
115
+ return x, x_
116
+
117
+
118
+ def inference(self, x, layer=0):
119
+ #x: (batch, time, dmodel)
120
+ if self.norm_first:
121
+ x_, attn = self._sa_block(self.norm1(x), layer)
122
+ x = x + x_
123
+ x = x + self._ff_block(self.norm2(x))
124
+ else:
125
+ x_, attn = self._sa_block(x, layer)
126
+ x = self.norm1(x + x_)
127
+ x = self.norm2(x + self._ff_block(x))
128
+
129
+
130
+ attn = attn.squeeze(-2) #batch, num_head, time, attn_len
131
+ batch, num_head, time, attn_len = attn.shape
132
+ padded_attn_len = (attn_len-1) * (2**layer) + 1
133
+ tmp_output = torch.zeros(batch, num_head, time, padded_attn_len, device=x.device)
134
+ for i, j in enumerate(range(0, padded_attn_len, 2**layer)):
135
+ tmp_output[:, :, :, j] = attn[:, :, :, i]
136
+
137
+ attn = torch.zeros(batch, num_head, time, time+(padded_attn_len-1)*2, device=x.device)
138
+ for i in range(time):
139
+ attn[:, :, i, i: i+padded_attn_len] = tmp_output[:, :, i]
140
+
141
+ center = (padded_attn_len-1)
142
+ attn = torch.cat(
143
+ [
144
+ attn[:, 0: 4, :, center - (2**layer) * 2: center - (2**layer) * 2 + time],
145
+ attn[:, 4: 5, :, center - (2**layer) * 1: center - (2**layer) * 1 + time],
146
+ attn[:, 5: 6, :, center - (2**layer) * 0: center - (2**layer) * 0 + time],
147
+ attn[:, 6: 7, :, center - (2**layer) * 3: center - (2**layer) * 3 + time],
148
+ attn[:, 7: 8, :, center - (2**layer) * 4: center - (2**layer) * 4 + time]
149
+ ],
150
+ dim=1
151
+ ) #restore the square attention matrix from dilated self-attention
152
+
153
+ return x, x_, attn
154
+
155
+
156
+ # self-attention block
157
+ def _sa_block(self, x, layer=0):
158
+ x, attn = self.self_attn(x, x, x, layer)
159
+ return self.dropout1(x), attn
160
+
161
+
162
+ # feed forward block
163
+ def _ff_block(self, x):
164
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
165
+ return self.dropout2(x)
166
+
167
+
168
+
169
+
170
+ if __name__ == '__main__':
171
+ BATCH=1
172
+ TIME=9
173
+ DMODEL=8
174
+ N_HEAD=4
175
+ ATTN_LEN=5
176
+ LAYER=1
177
+
178
+ x = torch.ones(BATCH, TIME, DMODEL)
179
+
180
+ model = DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(dmodel=DMODEL, num_heads=N_HEAD, attn_len=ATTN_LEN)
181
+
182
+ output, attn = model(x, x, x, layer=LAYER)
183
+ print(attn[0, 0, :, :, :])
Beat-Transformer/code/__pycache__/spectrogram_dataset.cpython-39.pyc ADDED
Binary file (10.5 kB). View file
 
Beat-Transformer/code/ablation_models/DilatedTransformerLayer.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch import nn
5
+ from torch.nn.modules.normalization import LayerNorm
6
+
7
+
8
+ class DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(nn.Module):
9
+ def __init__(self, dmodel, num_heads, dropout=0, Er_provided=False, attn_len=5):
10
+ super(DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding, self).__init__()
11
+ self.attn_len = attn_len
12
+ self.dmodel = dmodel
13
+ self.num_heads = num_heads
14
+ self.head_dim = dmodel // num_heads
15
+ assert self.head_dim * num_heads == dmodel, "embed_dim must be divisible by num_heads"
16
+
17
+ self.key = nn.Linear(dmodel, dmodel)
18
+ self.value = nn.Linear(dmodel, dmodel)
19
+ self.query = nn.Linear(dmodel, dmodel)
20
+ self.dropout = nn.Dropout(dropout)
21
+ self.Er_provided = Er_provided
22
+
23
+ if not Er_provided:
24
+ self.Er = nn.Parameter(torch.randn(num_heads, self.head_dim, attn_len))
25
+
26
+
27
+ def forward(self, query, key, value, layer=0):
28
+ #query, key, and value: (batch, time, dmodel), float tensor
29
+
30
+ batch, time, d_model = query.shape
31
+
32
+ q = self.query(query).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
33
+ k = self.key(key).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
34
+ v = self.value(value).reshape(batch, time, self.num_heads, 1, self.head_dim).transpose(1, 2) #(batch, num_head, time, 1, head_dim)
35
+
36
+ k = torch.cat(
37
+ (
38
+ self.kv_roll(k[:, 0: 4], layer, padding_value=0, shift=0),
39
+ self.kv_roll(k[:, 4: 5], layer, padding_value=0, shift=-2),
40
+ self.kv_roll(k[:, 5: 6], layer, padding_value=0, shift=-1),
41
+ self.kv_roll(k[:, 6: 7], layer, padding_value=0, shift=1),
42
+ self.kv_roll(k[:, 6: 7], layer, padding_value=0, shift=2) #This should be k[:, 7: 8]. We had this bug during training so we keep it to fit the checkpoints.
43
+ ),
44
+ dim=1
45
+ ) #we define 4 symmetrical heads and 4 skewed heads
46
+
47
+ v = torch.cat(
48
+ (
49
+ self.kv_roll(v[:, 0: 4], layer, padding_value=0, shift=0),
50
+ self.kv_roll(v[:, 4: 5], layer, padding_value=0, shift=-2),
51
+ self.kv_roll(v[:, 5: 6], layer, padding_value=0, shift=-1),
52
+ self.kv_roll(v[:, 6: 7], layer, padding_value=0, shift=1),
53
+ self.kv_roll(v[:, 7: 8], layer, padding_value=0, shift=2)
54
+ ),
55
+ dim=1
56
+ ) #we define 4 symmetrical heads and 4 skewed heads
57
+
58
+ Er_t = self.Er.unsqueeze(1).unsqueeze(0) #(1, num_head, 1, head_dim, attn_len)
59
+
60
+ qk = torch.matmul(q, k.transpose(-2, -1))
61
+ attn_mask = torch.zeros_like(qk).masked_fill_((qk==0), float('-inf'))
62
+ attn = (qk + torch.matmul(q, Er_t)) / math.sqrt(self.head_dim)
63
+ attn = F.softmax(attn + attn_mask, dim=-1)
64
+
65
+ out = torch.matmul(attn, v) #(batch, num_head, time, 1, head_dim)
66
+ out = out.squeeze(-2).transpose(1, 2).reshape(batch, time, d_model)
67
+
68
+ return self.dropout(out), attn
69
+
70
+
71
+
72
+ def kv_roll(self, tensor, layer, padding_value=0, shift=1):
73
+ #tensor: (batch, num_head, time, 1, head_dim)
74
+ batch, num_head, time, _, head_dim = tensor.shape
75
+
76
+ tensor = F.pad(tensor, (0, 0, 0, 0, (2**layer)*(self.attn_len//2), (2**layer)*(self.attn_len//2)), mode='constant', value=padding_value)
77
+ #(batch, num_head, time+(2**layer)*(self.attn_len//2), 1, head_dim)
78
+
79
+ tensor = torch.cat([torch.roll(tensor, shifts=-i*(2**layer), dims=2) for i in range(shift, self.attn_len+shift)], dim=-2)
80
+ #(batch, num_head, time+(2**layer)*(self.attn_len//2), attn_len, head_dim)
81
+
82
+ return tensor[:, :, :time, :, :] #(batch, num_head, time, attn_len, head_dim)
83
+
84
+
85
+
86
+
87
+ class DilatedTransformerLayer(nn.Module):
88
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, Er_provided=False, attn_len=5, norm_first=False, layer_norm_eps=1e-5):
89
+ super(DilatedTransformerLayer, self).__init__()
90
+ self.self_attn = DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(d_model, nhead, dropout, Er_provided, attn_len)
91
+ # Implementation of Feedforward model
92
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
93
+ self.dropout = nn.Dropout(dropout)
94
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
95
+
96
+ self.norm_first = norm_first
97
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)
98
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)
99
+ self.dropout1 = nn.Dropout(dropout)
100
+ self.dropout2 = nn.Dropout(dropout)
101
+
102
+ self.activation = F.gelu
103
+
104
+
105
+ def forward(self, x, layer=0):
106
+ #x: (batch, time, dmodel)
107
+ if self.norm_first:
108
+ x_ = self._sa_block(self.norm1(x), layer)[0]
109
+ x = x + x_
110
+ x = x + self._ff_block(self.norm2(x))
111
+ else:
112
+ x_ = self._sa_block(x, layer)[0]
113
+ x = self.norm1(x + x_)
114
+ x = self.norm2(x + self._ff_block(x))
115
+ return x, x_
116
+
117
+
118
+ def inference(self, x, layer=0):
119
+ #x: (batch, time, dmodel)
120
+ if self.norm_first:
121
+ x_, attn = self._sa_block(self.norm1(x), layer)
122
+ x = x + x_
123
+ x = x + self._ff_block(self.norm2(x))
124
+ else:
125
+ x_, attn = self._sa_block(x, layer)
126
+ x = self.norm1(x + x_)
127
+ x = self.norm2(x + self._ff_block(x))
128
+
129
+
130
+ attn = attn.squeeze(-2) #batch, num_head, time, attn_len
131
+ batch, num_head, time, attn_len = attn.shape
132
+ padded_attn_len = (attn_len-1) * (2**layer) + 1
133
+ tmp_output = torch.zeros(batch, num_head, time, padded_attn_len, device=x.device)
134
+ for i, j in enumerate(range(0, padded_attn_len, 2**layer)):
135
+ tmp_output[:, :, :, j] = attn[:, :, :, i]
136
+
137
+ attn = torch.zeros(batch, num_head, time, time+(padded_attn_len-1)*2, device=x.device)
138
+ for i in range(time):
139
+ attn[:, :, i, i: i+padded_attn_len] = tmp_output[:, :, i]
140
+
141
+ center = (padded_attn_len-1)
142
+ attn = torch.cat(
143
+ [
144
+ attn[:, 0: 4, :, center - (2**layer) * 2: center - (2**layer) * 2 + time],
145
+ attn[:, 4: 5, :, center - (2**layer) * 1: center - (2**layer) * 1 + time],
146
+ attn[:, 5: 6, :, center - (2**layer) * 0: center - (2**layer) * 0 + time],
147
+ attn[:, 6: 7, :, center - (2**layer) * 3: center - (2**layer) * 3 + time],
148
+ attn[:, 7: 8, :, center - (2**layer) * 4: center - (2**layer) * 4 + time]
149
+ ],
150
+ dim=1
151
+ ) #restore the square attention matrix from dilated self-attention
152
+
153
+ return x, x_, attn
154
+
155
+
156
+ # self-attention block
157
+ def _sa_block(self, x, layer=0):
158
+ x, attn = self.self_attn(x, x, x, layer)
159
+ return self.dropout1(x), attn
160
+
161
+
162
+ # feed forward block
163
+ def _ff_block(self, x):
164
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
165
+ return self.dropout2(x)
166
+
167
+
168
+
169
+
170
+ if __name__ == '__main__':
171
+ BATCH=1
172
+ TIME=9
173
+ DMODEL=8
174
+ N_HEAD=4
175
+ ATTN_LEN=5
176
+ LAYER=1
177
+
178
+ x = torch.ones(BATCH, TIME, DMODEL)
179
+
180
+ model = DilatedMultiheadSelfAttentionWithRelativePositionalEmbedding(dmodel=DMODEL, num_heads=N_HEAD, attn_len=ATTN_LEN)
181
+
182
+ output, attn = model(x, x, x, layer=LAYER)
183
+ print(attn[0, 0, :, :, :])
Beat-Transformer/code/ablation_models/music_transformer.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from numpy import transpose
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn
6
+ from torch.nn.modules.normalization import LayerNorm
7
+ from torch.nn import Transformer
8
+ import copy
9
+
10
+
11
+ class MultiheadAttentionwithRelativePositionalEmbedding(nn.Module):
12
+ def __init__(self, dmodel, num_heads, dropout=0, Er_provided=False, max_len=3):
13
+ super(MultiheadAttentionwithRelativePositionalEmbedding, self).__init__()
14
+ self.L = 2 * max_len - 1
15
+ self.num_heads = num_heads
16
+ self.max_len = max_len
17
+ self.head_dim = dmodel // num_heads
18
+ assert self.head_dim * num_heads == dmodel, "embed_dim must be divisible by num_heads"
19
+
20
+ self.key = nn.Linear(dmodel, dmodel)
21
+ self.value = nn.Linear(dmodel, dmodel)
22
+ self.query = nn.Linear(dmodel, dmodel)
23
+ self.dropout = nn.Dropout(dropout)
24
+ self.Er_provided = Er_provided
25
+ self.num_heads = num_heads
26
+
27
+ if not Er_provided:
28
+ self.Er = nn.Parameter(torch.randn(num_heads, self.L, self.head_dim))
29
+
30
+ def forward(self, query, key, value, Er=None, layer=0, attn_mask=None):
31
+ #x: (batch, len, dmodel)
32
+ #Srel: (num_head, tgt_len, src_len)
33
+ #attn_mask: (batch, num_head, tgt_len, src_len): float tensor
34
+ bs, tgt_len, d_model = query.shape
35
+ _, src_len, _ = key.shape
36
+
37
+ q = self.query(query).reshape(bs, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) #(batch, num_head, tgt_len, head_dim)
38
+ k = self.key(key).reshape(bs, src_len, self.num_heads, self.head_dim).permute(0, 2, 3, 1) #(batch, num_head, head_dim, src_len)
39
+ v = self.value(value).reshape(bs, src_len, self.num_heads, self.head_dim).transpose(1, 2) #(batch, num_head, src_len, head_dim)
40
+
41
+ Er_t = torch.zeros(self.num_heads, 2*src_len-1, self.head_dim, device=query.device)
42
+ dilation_len = min(1 + (src_len-1)//(2**layer), self.max_len)
43
+ if not self.Er_provided:
44
+ Er_t[:, [src_len-1 + i*(2**layer) for i in range(-dilation_len+1, dilation_len)], :] = self.Er[:, self.max_len-dilation_len: self.max_len+dilation_len-1, :]
45
+ else:
46
+ Er_t[:, [src_len-1 + i*(2**layer) for i in range(-dilation_len+1, dilation_len)], :] = Er[:, self.max_len-dilation_len: self.max_len+dilation_len-1, :]
47
+ Er_t = Er_t.transpose(-2, -1) #(num_head, head_dim, src_L)
48
+
49
+ QEr = torch.matmul(q, Er_t) #(num_head, num_head, tgt_len, src_L)
50
+ #print(QEr[0, 0])
51
+ Srel = self.skew(QEr, src_len) #(num_head, num_head, tgt_len, src_len)
52
+ #print('Srel', Srel[1, 1])
53
+
54
+ attn = (torch.matmul(q, k) + Srel) / math.sqrt(self.head_dim) #(batch, num_head, tgt_len, src_len)
55
+
56
+ if attn_mask is not None:
57
+ #print(attn.shape, attn_mask.shape)
58
+ attn += attn_mask
59
+ #for i in range(attn.shape[0]):
60
+ # print(attn_mask[i, 0])
61
+ attn = F.softmax(attn, dim=-1)
62
+
63
+ out = torch.matmul(attn, v) #(batch, num_head, tgt_len, head_dim)
64
+ out = out.transpose(1, 2).reshape(bs, tgt_len, d_model) #(batch, tgt_len, d_model)
65
+
66
+ return self.dropout(out), attn
67
+
68
+ def skew(self, QEr, src_len):
69
+ #QEr: (batch, num_heads, tgt_len, src_L)
70
+ bs, num_heads, tgt_len, src_L = QEr.shape
71
+ QEr = F.pad(QEr, (0, 1)) #(batch, num_heads, tgt_len, src_L+1)
72
+ QEr = QEr.reshape(bs, num_heads, -1) #(batch, num_heads, tgt_len*(src_L+1))
73
+ QEr = F.pad(QEr, (0, src_L-tgt_len)) #(batch, num_heads, (tgt_len+1)*src_L)
74
+ QEr = QEr.reshape(bs, num_heads, tgt_len+1, src_L)
75
+ QEr = QEr[:, :, :tgt_len, -src_len:] #(batch, num_heads, tgt_len, src_len)
76
+ return QEr
77
+
78
+
79
+ class TransformerEncoderLayer(nn.Module):
80
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, Er_provided=False, max_len=3, layer_norm_eps=1e-5, norm_first=False):
81
+ super(TransformerEncoderLayer, self).__init__()
82
+ self.self_attn = MultiheadAttentionwithRelativePositionalEmbedding(d_model, nhead, dropout, Er_provided, max_len)
83
+ # Implementation of Feedforward model
84
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
85
+ self.dropout = nn.Dropout(dropout)
86
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
87
+
88
+ self.norm_first = norm_first
89
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)
90
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)
91
+ self.dropout1 = nn.Dropout(dropout)
92
+ self.dropout2 = nn.Dropout(dropout)
93
+
94
+ self.activation = F.gelu
95
+
96
+ def forward(self, x, Er=None, layer=0, src_mask=None):
97
+ #x: (batch, len, dmodel)
98
+ #Er: (num_head, tgt_len, src_len)
99
+ #key_padding_mask: (batch, num_head, tgt_len, src_len), bool tensor
100
+ #attn_mask: (batch, num_head, tgt_len, src_len): float tensor
101
+ if self.norm_first:
102
+ x = x + self._sa_block(self.norm1(x), Er, layer, src_mask)
103
+ x = x + self._ff_block(self.norm2(x))
104
+ else:
105
+ x = self.norm1(x + self._sa_block(x, Er, layer, src_mask))
106
+ x = self.norm2(x + self._ff_block(x))
107
+ return x
108
+
109
+ # self-attention block
110
+ def _sa_block(self, x, Er=None, layer=0, attn_mask=None):
111
+ x = self.self_attn(x, x, x, Er, layer, attn_mask=attn_mask)[0]
112
+ return self.dropout1(x)
113
+
114
+ # feed forward block
115
+ def _ff_block(self, x):
116
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
117
+ return self.dropout2(x)
118
+
119
+
120
+
121
+ def generate_dilation_self_attention_mask(batch, num_head, seq_len, max_len, layer):
122
+ attn_mask = torch.eye(seq_len).repeat(batch, num_head, 1, 1)
123
+ mask_temp = torch.eye(seq_len).repeat(batch, num_head, 1, 1)
124
+ for i in range(1, max_len):
125
+ attn_mask[:, :, : -i*(2**layer), i*(2**layer):] += mask_temp[:, :, i*(2**layer):, i*(2**layer):]
126
+ attn_mask[:, :, i*(2**layer):, : -i*(2**layer)] += mask_temp[:, :, i*(2**layer):, i*(2**layer):]
127
+ attn_mask = (1-attn_mask).masked_fill((attn_mask == 0), -float('inf'))
128
+ return attn_mask
129
+
130
+
131
+
132
+
133
+ if __name__ == '__main__':
134
+ MAX_LEN=3
135
+ LAYER=0
136
+
137
+ model = MultiheadAttentionwithRelativePositionalEmbedding(dmodel=12, num_heads=6, max_len=MAX_LEN)
138
+
139
+ x = torch.ones(3, 8, 12)
140
+
141
+ attn_mask = generate_dilation_self_attention_mask(3, 6, 8, MAX_LEN, LAYER)
142
+ #print(attn_mask[1, 1, :, :].numpy())
143
+
144
+ output, attn = model(x, x, x, attn_mask=attn_mask, layer=LAYER)
145
+ #print(attn[1, 1])
Beat-Transformer/code/ablation_models/non_demix_model.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from DilatedTransformerLayer import DilatedTransformerLayer
4
+
5
+
6
+ class DilatedTransformerModel(nn.Module):
7
+ def __init__(self, attn_len=5, ntoken=2, dmodel=128, nhead=2, d_hid=512, nlayers=9, norm_first=True, dropout=.1):
8
+ super(DilatedTransformerModel, self).__init__()
9
+ self.nhead = nhead
10
+ self.nlayers = nlayers
11
+ self.attn_len = attn_len
12
+ self.head_dim = dmodel // nhead
13
+ assert self.head_dim * nhead == dmodel, "embed_dim must be divisible by num_heads"
14
+
15
+ #self.Er = nn.Parameter(torch.randn(nlayers, nhead, self.head_dim, attn_len))
16
+
17
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(5, 3), stride=1, padding=(2, 0))#126
18
+ #self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), stride=1, padding=(1, 0))#79
19
+ self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#26
20
+ self.dropout1 = nn.Dropout(p=dropout)
21
+
22
+ self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 12), stride=1, padding=(0, 0))#31
23
+ #self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 12), stride=1, padding=(0, 0))#15
24
+ self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#5
25
+ self.dropout2 = nn.Dropout(p=dropout)
26
+
27
+ self.conv3 = nn.Conv2d(in_channels=64, out_channels=dmodel, kernel_size=(3, 6), stride=1, padding=(1, 0))#5
28
+ #self.conv3 = nn.Conv2d(in_channels=64, out_channels=dmodel, kernel_size=(3, 3), stride=1, padding=(1, 0))#3
29
+ self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#1
30
+ self.dropout3 = nn.Dropout(p=dropout)
31
+
32
+ self.Transformer_layers = nn.ModuleDict({})
33
+ for idx in range(nlayers):
34
+ self.Transformer_layers[f'Transformer_layer_{idx}'] = DilatedTransformerLayer(dmodel, nhead, d_hid, dropout, Er_provided=False, attn_len=attn_len, norm_first=norm_first)
35
+
36
+ self.out_linear = nn.Linear(dmodel, ntoken)
37
+
38
+ self.dropout_t = nn.Dropout(p=.5)
39
+ self.out_linear_t = nn.Linear(dmodel, 300)
40
+
41
+
42
+ def forward(self, x):
43
+ #x: (batch, time, dmodel), FloatTensor
44
+ x = x.unsqueeze(1) #(batch, channel, time, dmodel)
45
+ x = self.conv1(x)
46
+ x = self.maxpool1(x)
47
+ x = torch.relu(x)
48
+ x = self.dropout1(x)
49
+
50
+ x = self.conv2(x)
51
+ x = self.maxpool2(x)
52
+ x = torch.relu(x)
53
+ x = self.dropout2(x)
54
+
55
+ x = self.conv3(x)
56
+ x = self.maxpool3(x)
57
+ x = torch.relu(x)
58
+ x = self.dropout3(x) #(batch, channel, time, 1)
59
+ x = x.transpose(1, 3).squeeze(1).contiguous() #(batch, time, channel=dmodel)
60
+
61
+ batch, time, dmodel = x.shape
62
+ t = []
63
+ for layer in range(self.nlayers):
64
+ x, skip = self.Transformer_layers[f'Transformer_layer_{layer}'](x, layer=layer)
65
+ t.append(skip)
66
+
67
+ x = torch.relu(x)
68
+ x = self.out_linear(x)
69
+
70
+ t = torch.stack(t, axis=-1).sum(dim=-1)
71
+ t = torch.relu(t)
72
+ t = self.dropout_t(t)
73
+ t = t.mean(dim=1) #(batch, dmodel)
74
+ t = self.out_linear_t(t)
75
+
76
+ return x, t
77
+
78
+ def inference(self, x):
79
+ #x: (batch, time, dmodel), FloatTensor
80
+ x = x.unsqueeze(1) #(batch, channel, time, dmodel)
81
+ x = self.conv1(x)
82
+ x = self.maxpool1(x)
83
+ x = torch.relu(x)
84
+ x = self.dropout1(x)
85
+
86
+ x = self.conv2(x)
87
+ x = self.maxpool2(x)
88
+ x = torch.relu(x)
89
+ x = self.dropout2(x)
90
+
91
+ x = self.conv3(x)
92
+ x = self.maxpool3(x)
93
+ x = torch.relu(x)
94
+ x = self.dropout3(x) #(batch, channel, time, 1)
95
+ x = x.transpose(1, 3).squeeze(1).contiguous() #(batch, time, channel=dmodel)
96
+
97
+ batch, time, dmodel = x.shape
98
+ t = []
99
+ attn = [torch.eye(time, device=x.device).repeat(batch, self.nhead, 1, 1)]
100
+ for layer in range(self.nlayers):
101
+ x, skip, layer_attn = self.Transformer_layers[f'Transformer_layer_{layer}'].inference(x, layer=layer)
102
+ t.append(skip)
103
+ attn.append(torch.matmul(attn[-1], layer_attn.transpose(-2, -1)))
104
+
105
+
106
+ x = torch.relu(x)
107
+ x = self.out_linear(x)
108
+
109
+ t = torch.stack(t, axis=-1).sum(dim=-1)
110
+ t = torch.relu(t)
111
+ t = self.dropout_t(t)
112
+ t = t.mean(dim=1) #(batch, dmodel)
113
+ t = self.out_linear_t(t)
114
+
115
+ return x, t, attn
116
+
117
+
118
+ if __name__ == '__main__':
119
+ from non_demix_spectrogram_dataset import audioDataset
120
+ from torch.utils.data import DataLoader
121
+ from tqdm import tqdm
122
+ import numpy as np
123
+ import madmom
124
+ from utils import AverageMeter
125
+ SAMPLE_SIZE = int(44100 / 1024 * 180)
126
+ INSTR =5
127
+ FPS = 44100 / 1024
128
+ NUM_FOLDS = 8
129
+ #model
130
+ NORM_FIRST=True
131
+ ATTN_LEN=5
132
+ NTOKEN=2
133
+ DMODEL=256
134
+ NHEAD=8
135
+ DHID=512
136
+ NLAYER=9
137
+ DROPOUT=.1
138
+
139
+ DEVICE=f'cuda:{0}'
140
+ TRAIN_BATCH_SIZE = 1
141
+
142
+ DATASET_PATH = './data/demix_spectrogram_data.npz'
143
+ ANNOTATION_PATH = './data/full_beat_annotation.npz'
144
+ DATA_TO_LOAD = ['gtzan']
145
+ TEST_ONLY = ['gtzan']
146
+
147
+ model = DilatedTransformerModel(attn_len=ATTN_LEN,
148
+ ntoken=NTOKEN,
149
+ dmodel=DMODEL,
150
+ nhead=NHEAD,
151
+ d_hid=DHID,
152
+ nlayers=NLAYER,
153
+ norm_first=NORM_FIRST,
154
+ dropout=DROPOUT
155
+ )
156
+ model.load_state_dict(torch.load("/mnt/c/Users/zhaoj/Desktop/trf_param_018.pt", map_location=torch.device(DEVICE)))
157
+ model.to(DEVICE)
158
+
159
+
160
+ dataset = audioDataset(data_to_load=DATA_TO_LOAD,
161
+ test_only_data = TEST_ONLY,
162
+ data_path = DATASET_PATH,
163
+ annotation_path = ANNOTATION_PATH,
164
+ fps = FPS,
165
+ sample_size = SAMPLE_SIZE,
166
+ num_folds = 1)
167
+ _, _, test_set = dataset.get_fold(fold=0)
168
+ #loader = DataLoader(val_set, batch_size=1, shuffle=False)
169
+ loader = DataLoader(test_set, batch_size=1, shuffle=False)
170
+
171
+
172
+ beat_DBN_meter = AverageMeter()
173
+ downbeat_DBN_meter = AverageMeter()
174
+
175
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS,
176
+ transition_lambda=100,
177
+ observation_lambda=6,
178
+ num_tempi=None,
179
+ threshold=0.2)
180
+
181
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS,
182
+ transition_lambda=100,
183
+ observation_lambda=6,
184
+ num_tempi=None,
185
+ threshold=0.2)
186
+
187
+ activations = {}
188
+ beat_gt = {}
189
+ downbeat_gt = {}
190
+
191
+ count = 0
192
+ with torch.no_grad():
193
+ for idx, (dataset_key, data, beat, downbeat, tempo, root) in tqdm(enumerate(loader), total=len(loader)):
194
+ #data
195
+ data = data.float().to(DEVICE)
196
+ print(data.shape)
197
+ pred, _ = model(data)
198
+ beat_pred = torch.sigmoid(pred[0, :, 0]).detach().cpu().numpy()
199
+ downbeat_pred = torch.sigmoid(pred[0, :, 1]).detach().cpu().numpy()
200
+
201
+ beat = torch.nonzero(beat[0]>.5)[:, 0].detach().numpy() / (FPS)
202
+ downbeat = torch.nonzero(downbeat[0]>.5)[:, 0].detach().numpy() / (FPS)
203
+
204
+ dataset_key = dataset_key[0]
205
+ root = root[0]
206
+ if not dataset_key in activations:
207
+ activations[dataset_key] = []
208
+ beat_gt[dataset_key] = []
209
+ downbeat_gt[dataset_key] = []
210
+ activations[dataset_key].append(np.stack((beat_pred, downbeat_pred), axis=0))
211
+ beat_gt[dataset_key].append(beat)
212
+ downbeat_gt[dataset_key].append(downbeat)
213
+
214
+ #count += 1
215
+ #if count == 50:
216
+ # break
217
+
218
+ for dataset_key in activations:
219
+ print(f'inferencing on {dataset_key} ...')
220
+ beat_error = 0
221
+ downbeat_error = 0
222
+ for i in tqdm(range(len(activations[dataset_key]))):
223
+ pred = activations[dataset_key][i]
224
+ #print(pred.shape)
225
+ beat = beat_gt[dataset_key][i]
226
+ downbeat = downbeat_gt[dataset_key][i]
227
+
228
+ try:
229
+ dbn_beat_pred = beat_tracker(pred[0])
230
+ beat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_beat_pred, beat)
231
+ beat_DBN_meter.update(f'{dataset_key}-fmeasure', beat_score_DBN.fmeasure)
232
+ beat_DBN_meter.update(f'{dataset_key}-cmlt', beat_score_DBN.cmlt)
233
+ beat_DBN_meter.update(f'{dataset_key}-amlt', beat_score_DBN.amlt)
234
+
235
+ except Exception as e:
236
+ #print(f'beat inference encounter exception {e}')
237
+ beat_error += 1
238
+
239
+
240
+ try:
241
+ combined_act = np.concatenate((np.maximum(pred[0] - pred[1], np.zeros(pred[0].shape))[:, np.newaxis], pred[1][:, np.newaxis]), axis=-1) #(T, 2)
242
+ #print(combined_act.shape)
243
+ dbn_downbeat_pred = downbeat_tracker(combined_act)
244
+ dbn_downbeat_pred = dbn_downbeat_pred[dbn_downbeat_pred[:, 1]==1][:, 0]
245
+
246
+ downbeat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_downbeat_pred, downbeat)
247
+ downbeat_DBN_meter.update(f'{dataset_key}-fmeasure', downbeat_score_DBN.fmeasure)
248
+ downbeat_DBN_meter.update(f'{dataset_key}-cmlt', downbeat_score_DBN.cmlt)
249
+ downbeat_DBN_meter.update(f'{dataset_key}-amlt', downbeat_score_DBN.amlt)
250
+ except Exception as e:
251
+ #print(f'downbeat inference encounter exception {e}')
252
+ downbeat_error += 1
253
+ print(f'beat error: {beat_error}; downbeat error: {downbeat_error}')
254
+
255
+ print('DBN beat detection')
256
+ for key in beat_DBN_meter.avg.keys():
257
+ print('\t', key, beat_DBN_meter.avg[key])
258
+
259
+ print('DBN downbeat detection')
260
+ for key in downbeat_DBN_meter.avg.keys():
261
+ print('\t', key, downbeat_DBN_meter.avg[key])
Beat-Transformer/code/ablation_models/non_demix_spectrogram_dataset.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import madmom
4
+ import torch
5
+ import librosa
6
+ import numpy as np
7
+ from torch.utils.data import Dataset
8
+ from scipy.ndimage import maximum_filter1d
9
+ from tqdm import tqdm
10
+ from matplotlib import pyplot as plt
11
+ import librosa.display
12
+ from scipy.interpolate import interp1d
13
+ from scipy.signal import argrelmax
14
+
15
+
16
+
17
+ class dataset_processing(Dataset):
18
+ def __init__(self, full_data,
19
+ full_annotation,
20
+ audio_files,
21
+ mode='train',
22
+ fold=0,
23
+ fps=44100/1024,
24
+ sample_size = 512,
25
+ num_folds=8,
26
+ mask_value=-1,
27
+ test_only = []
28
+ ):
29
+ self.fold = fold
30
+ self.num_folds = num_folds
31
+ self.fps = fps
32
+ self.mode = mode
33
+ self.sample_size = sample_size
34
+ self.MASK_VALUE = mask_value
35
+
36
+ self.data = []
37
+ self.beats = []
38
+ self.downbeats = []
39
+ self.tempi = []
40
+ self.root = []
41
+
42
+ if self.mode == 'train':
43
+ self.dataset_name = []
44
+ self.train_clip(full_data, full_annotation, test_only=test_only)
45
+
46
+ elif self.mode == 'validation' or self.mode == 'test':
47
+ self.dataset_name = []
48
+ self.audio_files = []
49
+ self.val_and_test_clip(full_data, full_annotation, audio_files, test_only=test_only)
50
+
51
+ full_data = None
52
+ full_annotation = None
53
+
54
+ def train_clip(self, full_data, full_annotation, num_tempo_bins=300, test_only=[]):
55
+ for fold_idx in tqdm(range(self.num_folds)):
56
+ if (fold_idx != self.fold) and (fold_idx != (self.fold+1)%self.num_folds):
57
+ for key in full_data:
58
+ if key == test_only:
59
+ continue
60
+ #print(f'processing {key} under fold {fold_idx}')
61
+ for song_idx in range(len(full_data[key][fold_idx])):
62
+ song = full_data[key][fold_idx][song_idx] #(t, 5, mel)
63
+ annotation = full_annotation[key][fold_idx][song_idx]
64
+ try:
65
+ #print(annotation, annotation.shape)
66
+ if len(annotation.shape) == 2:
67
+ beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
68
+ else:
69
+ beat = madmom.utils.quantize_events(annotation[:], fps=self.fps, length=len(song))
70
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
71
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
72
+ except:
73
+ beat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
74
+ print(f'beat load error at {key} dataset, skip it')
75
+
76
+ try:
77
+ downbeat = annotation[annotation[:, 1] == 1][:, 0]
78
+ downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
79
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
80
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
81
+ except:
82
+ downbeat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
83
+ if not ((key == 'smc') or (key == 'musicnet')):
84
+ print(f'downbeat load error at {key} dataset, skip it')
85
+
86
+ try:
87
+ #tempo = self.infer_tempo(annotation[:, 0])
88
+ #tempo = np.array([int(np.round(tempo))])
89
+ tempo = np.zeros(num_tempo_bins, dtype='float32')
90
+ if len(annotation.shape) == 2:
91
+ tempo[int(np.round(self.infer_tempo(annotation[:, 0])))] = 1
92
+ else:
93
+ tempo[int(np.round(self.infer_tempo(annotation[:])))] = 1
94
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
95
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
96
+ tempo = tempo/sum(tempo)
97
+ #tempo += np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.25)
98
+ except:
99
+ #tempo = np.array([self.MASK_VALUE])
100
+ tempo = np.ones(num_tempo_bins, dtype='float32') * self.MASK_VALUE
101
+
102
+ if self.sample_size is None:
103
+ self.dataset_name.append(key)
104
+ self.data.append(song)
105
+ self.beats.append(beat)
106
+ self.downbeats.append(downbeat)
107
+ self.tempi.append(tempo)
108
+ else:
109
+ if len(song) <= self.sample_size:
110
+ self.dataset_name.append(key)
111
+ self.data.append(song)
112
+ self.beats.append(beat)
113
+ self.downbeats.append(downbeat)
114
+ self.tempi.append(tempo)
115
+ else:
116
+ for i in range(0, len(song)-self.sample_size+1, self.sample_size):
117
+ self.dataset_name.append(key)
118
+ self.data.append(song[i: i+self.sample_size])
119
+ self.beats.append(beat[i: i+self.sample_size])
120
+ self.downbeats.append(downbeat[i: i+self.sample_size])
121
+ self.tempi.append(tempo)
122
+ if i + self.sample_size < len(song):
123
+ self.dataset_name.append(key)
124
+ self.data.append(song[len(song)-self.sample_size:])
125
+ self.beats.append(beat[len(song)-self.sample_size:])
126
+ self.downbeats.append(downbeat[len(song)-self.sample_size:])
127
+ self.tempi.append(tempo)
128
+
129
+
130
+ #print(len(self.data), len(self.beats), len(self.downbeats))
131
+
132
+ def val_and_test_clip(self, full_data, full_annotation, audio_files, num_tempo_bins=300, test_only=[]):
133
+ if self.mode == 'validation':
134
+ fold_idx = (self.fold+1)%self.num_folds
135
+ elif self.mode == 'test':
136
+ fold_idx = self.fold
137
+ for key in tqdm(full_data, total=len(full_data)):
138
+ #print(f'processing {key}')
139
+ if ((self.mode == 'validation') and (key in test_only)):
140
+ continue
141
+ for song_idx in range(len(full_data[key][fold_idx])):
142
+ song = full_data[key][fold_idx][song_idx]
143
+ annotation = full_annotation[key][fold_idx][song_idx]
144
+ audio_file = audio_files[key][fold_idx][song_idx]
145
+ try:
146
+ if len(annotation.shape) == 2:
147
+ beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
148
+ else:
149
+ beat = madmom.utils.quantize_events(annotation[:], fps=self.fps, length=len(song))
150
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
151
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
152
+ except:
153
+ beat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
154
+ print(f'beat load error at {key} dataset, skip it')
155
+
156
+ try:
157
+ downbeat = annotation[annotation[:, 1] == 1][:, 0]
158
+ downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
159
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
160
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
161
+ except:
162
+ downbeat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
163
+ if not ((key == 'smc') or (key == 'musicnet')):
164
+ print(f'downbeat load error at {key} dataset, skip it')
165
+
166
+ try:
167
+ #tempo = self.infer_tempo(annotation[:, 0])
168
+ #tempo = np.array([int(np.round(tempo))])
169
+ tempo = np.zeros(num_tempo_bins, dtype='float32')
170
+ if len(annotation.shape) == 2:
171
+ tempo[int(np.round(self.infer_tempo(annotation[:, 0])))] = 1
172
+ else:
173
+ tempo[int(np.round(self.infer_tempo(annotation[:])))] = 1
174
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
175
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
176
+ tempo = tempo/sum(tempo)
177
+ except:
178
+ #tempo = np.array([self.MASK_VALUE])
179
+ tempo = np.ones(num_tempo_bins, dtype='float32') * self.MASK_VALUE
180
+
181
+ if self.sample_size is None:
182
+ self.dataset_name.append(key)
183
+ self.root.append(audio_file)
184
+ self.data.append(song)
185
+ self.beats.append(beat)
186
+ self.downbeats.append(downbeat)
187
+ self.tempi.append(tempo)
188
+ else:
189
+ eval_sample_size = int(44100/1024 * 420)
190
+ if len(song) <= eval_sample_size:
191
+ self.dataset_name.append(key)
192
+ self.root.append(audio_file)
193
+ self.data.append(song)
194
+ self.beats.append(beat)
195
+ self.downbeats.append(downbeat)
196
+ self.tempi.append(tempo)
197
+ else:
198
+ for i in range(0, len(song)-eval_sample_size+1, eval_sample_size):
199
+ self.dataset_name.append(key)
200
+ self.root.append(audio_file)
201
+ self.data.append(song[i: i+eval_sample_size])
202
+ self.beats.append(beat[i: i+eval_sample_size])
203
+ self.downbeats.append(downbeat[i: i+eval_sample_size])
204
+ self.tempi.append(tempo)
205
+ if i + eval_sample_size < len(song):
206
+ self.dataset_name.append(key)
207
+ self.root.append(audio_file)
208
+ self.data.append(song[len(song)-eval_sample_size:])
209
+ self.beats.append(beat[len(song)-eval_sample_size:])
210
+ self.downbeats.append(downbeat[len(song)-eval_sample_size:])
211
+ self.tempi.append(tempo)
212
+
213
+ def infer_tempo(self, beats, hist_smooth=4, no_tempo=-1):
214
+ ibis = np.diff(beats) * self.fps
215
+ bins = np.bincount(np.round(ibis).astype(int))
216
+ # if no beats are present, there is no tempo
217
+ if not bins.any():
218
+ return no_tempo
219
+ # smooth histogram bins
220
+ if hist_smooth > 0:
221
+ bins = madmom.audio.signal.smooth(bins, hist_smooth)
222
+ #print(bins)
223
+ intervals = np.arange(len(bins))
224
+ # create interpolation function
225
+ interpolation_fn = interp1d(intervals, bins, 'quadratic')
226
+ # generate new intervals with 1000x the resolution
227
+ intervals = np.arange(intervals[0], intervals[-1], 0.001)
228
+ tempi = 60.0 * self.fps / intervals
229
+ # apply quadratic interpolation
230
+ bins = interpolation_fn(intervals)
231
+ peaks = argrelmax(bins, mode='wrap')[0]
232
+ if len(peaks) == 0:
233
+ # no peaks, no tempo
234
+ return no_tempo
235
+ else:
236
+ # report only the strongest tempo
237
+ sorted_peaks = peaks[np.argsort(bins[peaks])[::-1]]
238
+ return tempi[sorted_peaks][0]
239
+
240
+ def __len__(self):
241
+ return len(self.data)
242
+
243
+ def __getitem__(self, index):
244
+ x = np.sum(self.data[index], axis=1).transpose(1, 0) #(dmodel, T)
245
+ x = librosa.power_to_db(x, ref=np.max)
246
+ x = x.T
247
+ return self.dataset_name[index], x, self.beats[index], self.downbeats[index], self.tempi[index], self.root[index]
248
+
249
+
250
+
251
+
252
+
253
+ class audioDataset(object):
254
+ def __init__(self, data_to_load=['ballroom', 'carnetic', 'gtzan', 'hainsworth', 'smc', 'harmonix'],
255
+ test_only_data = ['hainsworth'],
256
+ data_path="/data1/zhaojw/dataset/linear_spectrogram_data.npz",
257
+ annotation_path="/data1/zhaojw/dataset/beat_annotation.npz",
258
+ fps=44100/1024,
259
+ SEED = 0,
260
+ num_folds=8,
261
+ mask_value = -1,
262
+ sample_size = 512
263
+ ):
264
+
265
+ self.fps = fps
266
+ self.sample_size = sample_size
267
+ self.mask_value = mask_value
268
+ self.num_folds = num_folds
269
+ self.test_only_data = test_only_data
270
+
271
+ load_linear_spectr = np.load(data_path, allow_pickle=True)
272
+ load_annotation = np.load(annotation_path, allow_pickle=True)
273
+
274
+ self.full_data = {}
275
+ self.full_annotation = {}
276
+ self.audio_files = {}
277
+ for key in load_linear_spectr:
278
+ if key in data_to_load:
279
+ time1 = time.time()
280
+ print(f'loading {key} dataset ...')
281
+ data = load_linear_spectr[key]
282
+ annotation = load_annotation[key]
283
+ assert(len(data) == len(annotation))
284
+
285
+ with open(f'./data/audio_lists/{key}.txt', 'r') as f:
286
+ audio_root = f.readlines()
287
+ audio_root = [item.replace('\n', '') for item in audio_root]
288
+ assert(len(data) == len(audio_root))
289
+ print(f'finish loading {key} with shape {data.shape}, using {time.time()-time1}s.')
290
+ #fold split
291
+ self.full_data[key] = {}
292
+ self.full_annotation[key] = {}
293
+ self.audio_files[key] = {}
294
+ if key in self.test_only_data:
295
+ FOLD_SIZE = len(data) // num_folds
296
+ np.random.seed(SEED)
297
+ np.random.shuffle(data)
298
+ np.random.seed(SEED)
299
+ np.random.shuffle(annotation)
300
+ np.random.seed(SEED)
301
+ np.random.shuffle(audio_root)
302
+ for i in range(num_folds):
303
+ self.full_data[key][i] = data[:]
304
+ self.full_annotation[key][i] = annotation[:]
305
+ self.audio_files[key][i] = audio_root[:]
306
+ else:
307
+ FOLD_SIZE = len(data) // num_folds
308
+ np.random.seed(SEED)
309
+ np.random.shuffle(data)
310
+ np.random.seed(SEED)
311
+ np.random.shuffle(annotation)
312
+ np.random.seed(SEED)
313
+ np.random.shuffle(audio_root)
314
+ for i in range(num_folds-1):
315
+ self.full_data[key][i] = data[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
316
+ self.full_annotation[key][i] = annotation[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
317
+ self.audio_files[key][i] = audio_root[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
318
+ self.full_data[key][num_folds-1] = data[(num_folds-1)*FOLD_SIZE: len(data)]
319
+ self.full_annotation[key][num_folds-1] = annotation[(num_folds-1)*FOLD_SIZE: len(annotation)]
320
+ self.audio_files[key][num_folds-1] = audio_root[(num_folds-1)*FOLD_SIZE: len(audio_root)]
321
+ data = None
322
+ annotation = None
323
+
324
+ def get_fold(self, fold=0):
325
+ print('processing train_set')
326
+ train_set = dataset_processing(full_data=self.full_data,
327
+ full_annotation=self.full_annotation,
328
+ audio_files=None,
329
+ mode='train',
330
+ fps=self.fps,
331
+ fold=fold,
332
+ sample_size = self.sample_size,
333
+ num_folds=self.num_folds,
334
+ mask_value=self.mask_value,
335
+ test_only=self.test_only_data
336
+ )
337
+
338
+ print('processing val_set')
339
+ val_set = dataset_processing(full_data=self.full_data,
340
+ full_annotation=self.full_annotation,
341
+ audio_files=self.audio_files,
342
+ mode='validation',
343
+ fps=self.fps,
344
+ fold=fold,
345
+ sample_size=self.sample_size,
346
+ num_folds=self.num_folds,
347
+ mask_value=self.mask_value,
348
+ test_only=self.test_only_data
349
+ )
350
+
351
+ print('processing test_set')
352
+ test_set = dataset_processing(full_data=self.full_data,
353
+ full_annotation=self.full_annotation,
354
+ audio_files=self.audio_files,
355
+ mode='test',
356
+ fps=self.fps,
357
+ fold=fold,
358
+ sample_size=self.sample_size,
359
+ num_folds=self.num_folds,
360
+ mask_value=self.mask_value,
361
+ test_only=self.test_only_data
362
+ )
363
+ return train_set, val_set, test_set
364
+
365
+
366
+
367
+ if __name__ == '__main__':
368
+ from torch.utils.data import DataLoader
369
+ #data_to_load=['ballroom', 'carnetic', 'gtzan', 'hainsworth', 'smc', 'harmonix']
370
+ dataset = audioDataset(data_to_load=['smc'],
371
+ test_only_data = ['gtzan'],
372
+ data_path = "./data/demix_spectrogram_data.npz",
373
+ annotation_path = "./data/full_beat_annotation.npz",
374
+ fps = 44100/1024,
375
+ sample_size = None,
376
+ num_folds = 8)
377
+ # Fold Splitting
378
+ train_set, val_set, test_set = dataset.get_fold(fold=0)
379
+ #train_loader = DataLoader(train_set, batch_size=1, shuffle=True)
380
+ #val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
381
+ test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
382
+ #for i, (key, data, beat, downbeat, tempo) in enumerate(val_data):
383
+ for i, (key, data, beat, downbeat, tempo, root) in enumerate(test_loader):
384
+ print('key:', key)
385
+ print('data:', data.shape)
386
+ print('beat:', beat.shape)
387
+ #print('beat:', torch.nonzero(beat))
388
+ print('downbeat:', downbeat.shape)
389
+ print('tempo:', tempo.shape)
390
+ print('audio_root:', root)
391
+ #print('downbeat:', torch.nonzero(downbeat))
392
+ break
393
+
Beat-Transformer/code/ablation_models/tcn.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class residual_block(nn.Module):
6
+ def __init__(self, i, in_channels, num_filter, kernel_size, dropout):
7
+ super(residual_block, self).__init__()
8
+ self.res = nn.Conv1d(in_channels=in_channels, out_channels=num_filter, kernel_size=1, padding='same')
9
+ self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=num_filter, kernel_size=kernel_size, dilation=i, padding='same')
10
+ self.conv2 = nn.Conv1d(in_channels=in_channels, out_channels=num_filter, kernel_size=kernel_size, dilation=i*2, padding='same')
11
+ self.elu = nn.ELU()
12
+ self.spatial_dropout = nn.Dropout2d(p=dropout)
13
+ self.conv_final = nn.Conv1d(in_channels=num_filter * 2, out_channels=num_filter, kernel_size=1, padding='same')
14
+
15
+ def forward(self, x):
16
+ #x: (B, F, T)
17
+ x_res = self.res(x)
18
+ x_1 = self.conv1(x)
19
+ x_2 = self.conv2(x)
20
+ x = torch.cat([x_1, x_2], dim=1)
21
+ x = self.elu(x).unsqueeze(-1) #(B, F, T, 1)
22
+ x = self.spatial_dropout(x).squeeze(-1) #(B, F, T)
23
+ x = self.conv_final(x)
24
+ return x + x_res, x
25
+
26
+
27
+ class TCN(nn.Module):
28
+ def __init__(self, num_layers=11, dropout=.1, kernel_size=5, n_token=2):
29
+ super(TCN, self).__init__()
30
+ self.nlayers = num_layers
31
+
32
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=20, kernel_size=3, stride=1, padding=(2, 0))
33
+ self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))
34
+ self.dropout1 = nn.Dropout(p=dropout)
35
+
36
+ self.conv2 = nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(1, 12), stride=1, padding=0)
37
+ self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))
38
+ self.dropout2 = nn.Dropout(p=dropout)
39
+
40
+ self.conv3 = nn.Conv2d(in_channels=20, out_channels=20, kernel_size=3, stride=1, padding=0)
41
+ self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))
42
+ self.dropout3 = nn.Dropout(p=dropout)
43
+
44
+ self.tcn_layers = nn.ModuleDict({})
45
+ for layer in range(num_layers):
46
+ self.tcn_layers[f'TCN_layer_{layer}'] = residual_block(i=2**layer, in_channels=20, num_filter=20, kernel_size=kernel_size, dropout=dropout)
47
+
48
+ self.out_linear = nn.Linear(20, n_token)
49
+
50
+ self.dropout_t = nn.Dropout(p=.5)
51
+ self.out_linear_t = nn.Linear(20, 300)
52
+
53
+
54
+ def forward(self, x):
55
+ # x: spectrogram of size (B, T, mel_bin)
56
+ x = x.unsqueeze(1) #(B, 1, T, mel_bin)
57
+ x = self.conv1(x)
58
+ x = self.maxpool1(x)
59
+ x = self.dropout1(x)
60
+
61
+ x = self.conv2(x)
62
+ x = self.maxpool2(x)
63
+ x = self.dropout2(x)
64
+
65
+ x = self.conv3(x)
66
+ x = self.maxpool3(x)
67
+ x = self.dropout3(x) #(B, 20, T, 1)
68
+ x = x.squeeze(-1) #(B, 20, T)
69
+
70
+ t = []
71
+ for layer in range(self.nlayers):
72
+ x, skip = self.tcn_layers[f'TCN_layer_{layer}'](x) #x: B, 20, T; skip: B, 20, T
73
+ t.append(skip)
74
+
75
+ x = torch.relu(x).transpose(-2, -1)
76
+ x = self.out_linear(x)
77
+
78
+ t = torch.stack(t, axis=-1).sum(dim=-1)
79
+ t = torch.relu(t)
80
+ t = self.dropout_t(t)
81
+ t = t.mean(dim=-1) #(batch, 20)
82
+ t = self.out_linear_t(t)
83
+
84
+ return x, t
85
+
86
+
87
+ if __name__ == '__main__':
88
+ from spectrogram_dataset import audioDataset
89
+ from torch.utils.data import DataLoader
90
+
91
+ DEVICE = 'cuda:2'
92
+ model = TCN(num_layers=11, dropout=.15)
93
+ model.to(DEVICE)
94
+ model.eval()
95
+ dataset = audioDataset(data_to_load=['smc'],
96
+ data_path = "/data1/zhaojw/dataset/madmom_data_100fps.npz",
97
+ annotation_path = "/data1/zhaojw/dataset/beat_annotation.npz",
98
+ fps = 100,
99
+ sample_size = None,
100
+ downsample_size=1,
101
+ hop_size = 128,
102
+ fold = 0,
103
+ num_folds = 8)
104
+
105
+ print(len(dataset.train_set), len(dataset.val_set), len(dataset.test_set))
106
+ train_data = DataLoader(dataset.train_set, batch_size=1, shuffle=True)
107
+ val_data = DataLoader(dataset.val_set, batch_size=1, shuffle=False)
108
+
109
+ #for i, (key, instr, data, mask, beat, downbeat) in enumerate(val_data):
110
+ for i, (key, data, beat, downbeat, tempo) in enumerate(val_data):
111
+ print(key)
112
+
113
+ data = data.float().cuda(DEVICE)#B, time, mel
114
+ print(f'data: {data.shape}')
115
+ print(f'beat: {beat.shape}')
116
+ print(f'downbeat: {downbeat.shape}')
117
+ print(f'tempo: {tempo.shape}')
118
+
119
+ x, t = model(data)
120
+ print(f'out: {x.shape}, {t.shape}')
121
+ break
Beat-Transformer/code/ablation_models/tcn_demix_model.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from music_transformer import TransformerEncoderLayer
4
+ import torch.nn.functional as F
5
+ import math
6
+ import sys
7
+
8
+ from tcn import residual_block
9
+
10
+
11
+ class DemixedTCN(nn.Module):
12
+ def __init__(self, attn_len=5, instr=5, ntoken=2, dmodel=128, nhead=2, d_hid=512, nlayers=9, norm_first=True, dropout=.1):
13
+ super(DemixedTCN, self).__init__()
14
+ self.nhead = nhead
15
+ self.nlayers = nlayers
16
+ self.attn_len = attn_len
17
+ self.head_dim = dmodel // nhead
18
+ self.dmodel = dmodel
19
+ assert self.head_dim * nhead == dmodel, "embed_dim must be divisible by num_heads"
20
+
21
+ #self.Er = nn.Parameter(torch.randn(nhead, self.head_dim, attn_len))
22
+ #instr_pe = self.generate_instr_pe(ninstr, dmodel)
23
+ #self.register_buffer('instr_pe', instr_pe)
24
+
25
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(5, 3), stride=1, padding=(2, 0))#126
26
+ self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#42
27
+ self.dropout1 = nn.Dropout(p=dropout)
28
+
29
+ self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 12), stride=1, padding=(0, 0))#31
30
+ self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#10
31
+ self.dropout2 = nn.Dropout(p=dropout)
32
+
33
+ self.conv3 = nn.Conv2d(in_channels=64, out_channels=dmodel, kernel_size=(3, 6), stride=1, padding=(1, 0))#5
34
+ self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3))#1
35
+ self.dropout3 = nn.Dropout(p=dropout)
36
+
37
+ self.head_er = nn.Parameter(torch.randn(nhead, self.head_dim, 1))
38
+
39
+ self.Transformer_layers = nn.ModuleDict({})
40
+ for idx in range(nlayers):
41
+ #self.Transformer_layers[f'time_attention_{idx}'] = DilatedTransformerLayer(dmodel, nhead, d_hid, dropout, Er_provided=False, attn_len=attn_len, norm_first=norm_first)
42
+ self.Transformer_layers[f'time_attention_{idx}'] = residual_block(2**idx, dmodel, dmodel, attn_len, dropout)
43
+
44
+
45
+ if (idx >= 3) and (idx <= 5):
46
+ self.Transformer_layers[f'instr_attention_{idx}'] = TransformerEncoderLayer(dmodel, nhead, d_hid, dropout, Er_provided=False, max_len=instr, norm_first=norm_first)
47
+
48
+ self.out_linear = nn.Linear(dmodel, ntoken)
49
+
50
+ self.dropout_t = nn.Dropout(p=.5)
51
+ self.out_linear_t = nn.Linear(dmodel, 300)
52
+
53
+
54
+ def forward(self, x):
55
+ #x: (batch, instr, time, dmodel), FloatTensor
56
+ #batch, time, dmodel = x.shape
57
+ batch, instr, time, melbin = x.shape
58
+ x = x.reshape(-1, 1, time, melbin)
59
+ x = self.conv1(x)
60
+ x = self.maxpool1(x)
61
+ x = torch.relu(x)
62
+ x = self.dropout1(x)
63
+
64
+ x = self.conv2(x)
65
+ x = self.maxpool2(x)
66
+ x = torch.relu(x)
67
+ x = self.dropout2(x)
68
+
69
+ x = self.conv3(x)
70
+ x = self.maxpool3(x)
71
+ x = torch.relu(x)
72
+ x = self.dropout3(x) #(batch*instr, channel, time, 1)
73
+
74
+ x = x.reshape(-1, self.dmodel, time).transpose(1, 2) #(batch*instr, time, channel=dmodel)
75
+ t = []
76
+
77
+ for layer in range(self.nlayers):
78
+ x = x.transpose(-1, -2)
79
+ x, skip = self.Transformer_layers[f'time_attention_{layer}'](x)
80
+
81
+ x = x.transpose(-1, -2)
82
+ skip = skip.transpose(-1, -2).reshape(batch, instr, time, self.dmodel)
83
+ #skip = skip.reshape(batch, instr, time, self.dmodel)
84
+ t.append(skip.mean(1))
85
+
86
+ if (layer >= 3) and (layer <= 5):
87
+ x = x.reshape(batch, instr, time, self.dmodel)
88
+ x = x.permute(0, 2, 1, 3)
89
+ x = x.reshape(-1, instr, self.dmodel)
90
+
91
+ #x = self.Transformer_layers[f'instr_attention_{layer}'](x, layer=layer)
92
+ x = self.Transformer_layers[f'instr_attention_{layer}'](x)
93
+
94
+ x = x.reshape(batch, time, instr, self.dmodel)
95
+ x = x.permute(0, 2, 1, 3)
96
+ x = x.reshape(-1, time, self.dmodel)
97
+
98
+ x = torch.relu(x)
99
+ x = x.reshape(batch, instr, time, self.dmodel)
100
+ x = x.mean(1)
101
+ x = self.out_linear(x)
102
+
103
+ t = torch.stack(t, axis=-1).sum(dim=-1)
104
+ t = torch.relu(t)
105
+ t = self.dropout_t(t)
106
+ t = t.mean(dim=1) #(batch, dmodel)
107
+ t = self.out_linear_t(t)
108
+
109
+ return x, t
110
+
111
+ def inference(self, x):
112
+ #x: (batch, instr, time, dmodel), FloatTensor
113
+ #batch, time, dmodel = x.shape
114
+ batch, instr, time, melbin = x.shape
115
+ x = x.reshape(-1, 1, time, melbin)
116
+ x = self.conv1(x)
117
+ x = self.maxpool1(x)
118
+ x = torch.relu(x)
119
+ x = self.dropout1(x)
120
+
121
+ x = self.conv2(x)
122
+ x = self.maxpool2(x)
123
+ x = torch.relu(x)
124
+ x = self.dropout2(x)
125
+
126
+ x = self.conv3(x)
127
+ x = self.maxpool3(x)
128
+ x = torch.relu(x)
129
+ x = self.dropout3(x) #(batch*instr, channel, time, 1)
130
+
131
+ x = x.reshape(-1, self.dmodel, time).transpose(1, 2) #(batch*instr, time, channel=dmodel)
132
+ t = []
133
+
134
+ attn = [torch.eye(time, device=x.device).repeat(batch, self.nhead, 1, 1)]
135
+
136
+ for layer in range(self.nlayers):
137
+ x, skip, layer_attn = self.Transformer_layers[f'time_attention_{layer}'].inference(x, layer=layer, head_er=self.head_er)
138
+ skip = skip.reshape(batch, instr, time, self.dmodel)
139
+ t.append(skip.mean(1))
140
+
141
+ attn.append(torch.matmul(attn[-1], layer_attn.transpose(-2, -1)))
142
+
143
+ if (layer >= 3) and (layer <= 5):
144
+ x = x.reshape(batch, instr, time, self.dmodel)
145
+ x = x.permute(0, 2, 1, 3)
146
+ x = x.reshape(-1, instr, self.dmodel)
147
+
148
+ #x = self.Transformer_layers[f'instr_attention_{layer}'](x, layer=layer)
149
+ x = self.Transformer_layers[f'instr_attention_{layer}'](x)
150
+
151
+ x = x.reshape(batch, time, instr, self.dmodel)
152
+ x = x.permute(0, 2, 1, 3)
153
+ x = x.reshape(-1, time, self.dmodel)
154
+
155
+ x = torch.relu(x)
156
+ x = x.reshape(batch, instr, time, self.dmodel)
157
+ x = x.mean(1)
158
+ x = self.out_linear(x)
159
+
160
+ t = torch.stack(t, axis=-1).sum(dim=-1)
161
+ t = torch.relu(t)
162
+ t = self.dropout_t(t)
163
+ t = t.mean(dim=1) #(batch, dmodel)
164
+ t = self.out_linear_t(t)
165
+
166
+ return x, t, attn
167
+
168
+
169
+
170
+
171
+ if __name__ == '__main__':
172
+ from spectrogram_dataset import audioDataset
173
+ from torch.utils.data import DataLoader
174
+
175
+ DEVICE = 'cpu'
176
+ model = DemixedTCN(attn_len=5, instr=5, ntoken=2, dmodel=256, nhead=8, d_hid=1024, nlayers=9, norm_first=True, dropout=.1)
177
+ model.to(DEVICE)
178
+ model.eval()
179
+
180
+ for name, param in model.state_dict().items():
181
+ print(name, param.shape)
182
+ # name: str
183
+ # param: Tensor
184
+
185
+ total = sum([param.nelement() for param in model.parameters()])
186
+ print(total)
187
+ #print("Number of parameter: %.2fM" % (total/1e6))
Beat-Transformer/code/ablation_models/utils.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json, sys, os
3
+ from torch import nn
4
+ import torch
5
+ from torch.distributions import kl_divergence, Normal
6
+ from torch.optim.lr_scheduler import ExponentialLR
7
+ import random
8
+ import madmom
9
+
10
+ class AverageMeter(object):
11
+ """Computes and stores the average and current value"""
12
+ def __init__(self):
13
+ self.reset()
14
+
15
+ def reset(self):
16
+ self.val = {}
17
+ self.avg = {}
18
+ self.sum = {}
19
+ self.count = {}
20
+
21
+ def update(self, key, val, n=1):
22
+ if not key in self.val:
23
+ self.val[key] = val
24
+ self.sum[key] = val * n
25
+ self.count[key] = n
26
+ self.avg[key] = self.sum[key] / self.count[key]
27
+ else:
28
+ self.val[key] = val
29
+ self.sum[key] += val * n
30
+ self.count[key] += n
31
+ self.avg[key] = self.sum[key] / self.count[key]
32
+
33
+ def binary_accuracy(beat_pred, beat_gt):
34
+ #beat: (B, T)
35
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
36
+ beat_pred = torch.as_tensor((torch.sigmoid(beat_pred) >= 0.5), dtype=torch.int32)
37
+ beat_gt = torch.as_tensor((beat_gt > 0.6), dtype=torch.int32)
38
+ positives = torch.as_tensor((beat_pred == beat_gt), dtype=torch.int32)
39
+ positives = positives * weight
40
+ binary_accuracy = positives.sum() / (weight.sum() + 1e-4)
41
+ return binary_accuracy
42
+
43
+ def beat_accuracy(beat_pred, beat_gt, fps):
44
+ #beat_pred: (B, L), estimation result
45
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
46
+ beat_pred = torch.sigmoid(beat_pred) * weight
47
+ beat_pred = torch.as_tensor((beat_pred - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
48
+ #beat_pred = (beat_pred / fps)
49
+ beat_gt = torch.as_tensor((beat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
50
+ #beat_gt = (beat_gt / fps)
51
+ #print(beat_gt)
52
+ batch_score = []
53
+ for idx in range(beat_pred.shape[0]):
54
+ #if (beat_gt[idx] == 0).all():
55
+ # continue
56
+ if np.sum(beat_gt[idx]) < 2:
57
+ continue
58
+ beat_pred_batch = np.nonzero(beat_pred[idx])[0] / fps
59
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
60
+ #print(beat_gt_batch)
61
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
62
+ batch_score.append(score)
63
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
64
+ return {"fmeasure": batch_score.fmeasure, \
65
+ #"cemgil": batch_score.cemgil, \
66
+ #"cmlc": batch_score.cmlc, \
67
+ "cmlt": batch_score.cmlt, \
68
+ #"amlc": batch_score.amlc, \
69
+ "amlt": batch_score.amlt}
70
+
71
+
72
+ def infer_beat_with_DBN(beat_pred, beat_gt, dbn_model, fps):
73
+ #beat_pred: (B, L), estimation result
74
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
75
+ beat_pred = (torch.sigmoid(beat_pred) * weight).detach().cpu().numpy()
76
+ #beat_pred = (beat_pred / fps)
77
+ beat_gt = torch.as_tensor((beat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
78
+ batch_score = []
79
+ for idx in range(beat_pred.shape[0]):
80
+ #if (beat_gt[idx] == 0).all():
81
+ # continue
82
+ if np.sum(beat_gt[idx]) < 2:
83
+ continue
84
+ try:
85
+ beat_pred_batch = dbn_model(beat_pred[idx])
86
+ except:
87
+ return {"fmeasure": 0, "cmlt": 0, "amlt": 0}
88
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
89
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
90
+ batch_score.append(score)
91
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
92
+ return {"fmeasure": batch_score.fmeasure if not np.isnan(batch_score.fmeasure) else 0, \
93
+ #"cemgil": batch_score.cemgil, \
94
+ #"cmlc": batch_score.cmlc, \
95
+ "cmlt": batch_score.cmlt if not np.isnan(batch_score.cmlt) else 0, \
96
+ #"amlc": batch_score.amlc, \
97
+ "amlt": batch_score.amlt if not np.isnan(batch_score.amlt) else 0}
98
+
99
+
100
+ def infer_downbeat_with_DBN(beat_pred, downbeat_pred, downbeat_gt, dbn_model, fps):
101
+ #beat_pred: (B, L), estimation result
102
+ beat_pred = torch.sigmoid(beat_pred).detach().cpu()
103
+ downbeat_pred = torch.sigmoid(downbeat_pred).detach().cpu()
104
+ combined_act = torch.cat((torch.maximum(beat_pred - downbeat_pred, torch.zeros(beat_pred.shape)).unsqueeze(-1), downbeat_pred.unsqueeze(-1)), dim=-1)
105
+ #beat_pred = (beat_pred / fps)
106
+ weight = (1 - torch.as_tensor(downbeat_gt == -1, dtype=torch.int32)).unsqueeze(-1).detach().cpu()
107
+ combined_act = (combined_act * weight).numpy()
108
+
109
+ beat_gt = torch.as_tensor((downbeat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
110
+ batch_score = []
111
+ for idx in range(beat_pred.shape[0]):
112
+ #if (beat_gt[idx] == 0).all():
113
+ # continue
114
+ if np.sum(beat_gt[idx]) < 2:
115
+ continue
116
+ try:
117
+ beat_pred_batch = dbn_model(combined_act[idx])
118
+ beat_pred_batch = beat_pred_batch[beat_pred_batch[:, 1]==1][:, 0]
119
+ except:
120
+ return {"fmeasure": 0, "cmlt": 0, "amlt": 0}
121
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
122
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
123
+ batch_score.append(score)
124
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
125
+ return {"fmeasure": batch_score.fmeasure if not np.isnan(batch_score.fmeasure) else 0, \
126
+ #"cemgil": batch_score.cemgil, \
127
+ #"cmlc": batch_score.cmlc, \
128
+ "cmlt": batch_score.cmlt if not np.isnan(batch_score.cmlt) else 0, \
129
+ #"amlc": batch_score.amlc, \
130
+ "amlt": batch_score.amlt if not np.isnan(batch_score.amlt) else 0}
131
+
132
+
133
+
134
+ def load_dataset_path(fn='model_config.json'):
135
+ with open(fn) as f:
136
+ paths = json.load(f)['dataset_path']
137
+
138
+ train_val_path = paths['hpc_data_path']
139
+ return train_val_path
140
+
141
+ def load_params_dict(key, fn='model_config.json'):
142
+ with open(fn) as f:
143
+ dict = json.load(f)[key]
144
+ return dict
145
+
146
+
147
+ def count_parameters(model):
148
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
149
+
150
+
151
+ def init_weights(m):
152
+ for name, param in m.named_parameters():
153
+ if 'weight' in name:
154
+ nn.init.normal_(param.data, mean=0, std=0.01)
155
+ else:
156
+ nn.init.constant_(param.data, 0)
157
+
158
+ def standard_normal(shape):
159
+ N = Normal(torch.zeros(shape), torch.ones(shape))
160
+ if torch.cuda.is_available():
161
+ N.loc = N.loc.cuda()
162
+ N.scale = N.scale.cuda()
163
+ return N
164
+
165
+
166
+ def loss_function_vae(recon_pitch, pitch, dist, pitch_criterion, normal,
167
+ weights=(1, .1)):
168
+ # bs = dist.mean.size(0)
169
+ #print(recon_pitch.shape, pitch.shape, recon_rhythm.shape, rhythm.shape)
170
+ pitch_loss = pitch_criterion(recon_pitch, pitch)
171
+ kl_div = kl_divergence(dist, normal).mean()
172
+ loss = weights[0] * pitch_loss + weights[1] * kl_div
173
+ return loss, pitch_loss, kl_div
174
+
175
+ def loss_function_discr(recon_mask, mask_gt, dist, mask_criterion, normal,
176
+ weights=(1, .1)):
177
+ # bs = dist.mean.size(0)
178
+ #print(recon_pitch.shape, pitch.shape, recon_rhythm.shape, rhythm.shape)
179
+ mask_loss = mask_criterion(recon_mask, mask_gt)
180
+ kl_div = kl_divergence(dist, normal).mean()
181
+ loss = weights[0] * mask_loss + weights[1] * kl_div
182
+ return loss, mask_loss, kl_div
183
+
184
+ def get_complement(mask_gt):
185
+ #mask_gt: (BT, 128)
186
+ complement = torch.zeros(mask_gt.shape).long().cuda()
187
+ for i in range(mask_gt.shape[0]):
188
+ if random.random() < 0.5:
189
+ low = max(mask_gt[i].max(0)[-1].item() - 5, 0)
190
+ high = min(mask_gt[i].max(0)[-1].item() + 6, 127)
191
+ else:
192
+ low = max(mask_gt[i].max(0)[-1].item() - 6, 0)
193
+ high = min(mask_gt[i].max(0)[-1].item() + 5, 127)
194
+ #print(low, high)
195
+ complement[i, low: high+1] = 1.
196
+ return complement - mask_gt
197
+
198
+
199
+ # Useful function for how long epochs take
200
+ def epoch_time(start_time, end_time):
201
+ elapsed_time = end_time - start_time
202
+ elapsed_mins = int(elapsed_time / 60)
203
+ elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
204
+ return elapsed_mins, elapsed_secs
205
+
206
+
207
+ class MinExponentialLR(ExponentialLR):
208
+ def __init__(self, optimizer, gamma, minimum, last_epoch=-1):
209
+ self.min = minimum
210
+ super(MinExponentialLR, self).__init__(optimizer, gamma, last_epoch=-1)
211
+
212
+ def get_lr(self):
213
+ return [
214
+ max(base_lr * self.gamma ** self.last_epoch, self.min)
215
+ for base_lr in self.base_lrs
216
+ ]
217
+
218
+
219
+ def scheduled_sampling(i, high=0.7, low=0.05):
220
+ x = 10 * (i - 0.5)
221
+ z = 1 / (1 + np.exp(x))
222
+ y = (high - low) * z + low
223
+ return y
224
+
225
+
226
+
227
+ def piano_roll_to_target(pr):
228
+ # pr: (32, 128, 3), dtype=bool
229
+
230
+ # Assume that "not (first_layer or second layer) = third_layer"
231
+ pr[:, :, 1] = np.logical_not(np.logical_or(pr[:, :, 0], pr[:, :, 2]))
232
+ # To int dtype can make addition work
233
+ pr = pr.astype(int)
234
+ # Initialize a matrix to store the duration of a note on the (32, 128) grid
235
+ pr_matrix = np.zeros((32, 128))
236
+
237
+ for i in range(31, -1, -1):
238
+ # At each iteration
239
+ # 1. Assure that the second layer accumulates the note duration
240
+ # 2. collect the onset notes in time step i, and mark it on the matrix.
241
+
242
+ # collect
243
+ onset_idx = np.where(pr[i, :, 0] == 1)[0]
244
+ pr_matrix[i, onset_idx] = pr[i, onset_idx, 1] + 1
245
+ if i == 0:
246
+ break
247
+ # Accumulate
248
+ # pr[i - 1, :, 1] += pr[i, :, 1]
249
+ # pr[i - 1, onset_idx, 1] = 0 # the onset note should be set 0.
250
+ pr[i, onset_idx, 1] = 0 # the onset note should be set 0.
251
+ pr[i - 1, :, 1] += pr[i, :, 1]
252
+
253
+ return pr_matrix
254
+
255
+
256
+ def target_to_3dtarget(pr_mat, max_note_count=11, max_pitch=107, min_pitch=22,
257
+ pitch_pad_ind=88, dur_pad_ind=2,
258
+ pitch_sos_ind=86, pitch_eos_ind=87):
259
+ """
260
+ :param pr_mat: (32, 128) matrix. pr_mat[t, p] indicates a note of pitch p,
261
+ started at time step t, has a duration of pr_mat[t, p] time steps.
262
+ :param max_note_count: the maximum number of notes in a time step,
263
+ including <sos> and <eos> tokens.
264
+ :param max_pitch: the highest pitch in the dataset.
265
+ :param min_pitch: the lowest pitch in the dataset.
266
+ :param pitch_pad_ind: see return value.
267
+ :param dur_pad_ind: see return value.
268
+ :param pitch_sos_ind: sos token.
269
+ :param pitch_eos_ind: eos token.
270
+ :return: pr_mat3d is a (32, max_note_count, 6) matrix. In the last dim,
271
+ the 0th column is for pitch, 1: 6 is for duration in binary repr. Output is
272
+ padded with <sos> and <eos> tokens in the pitch column, but with pad token
273
+ for dur columns.
274
+ """
275
+ pitch_range = max_pitch - min_pitch + 1 # including pad
276
+ pr_mat3d = np.ones((32, max_note_count, 6), dtype=int) * dur_pad_ind
277
+ pr_mat3d[:, :, 0] = pitch_pad_ind
278
+ pr_mat3d[:, 0, 0] = pitch_sos_ind
279
+ cur_idx = np.ones(32, dtype=int)
280
+ for t, p in zip(*np.where(pr_mat != 0)):
281
+ pr_mat3d[t, cur_idx[t], 0] = p - min_pitch
282
+ binary = np.binary_repr(int(pr_mat[t, p]) - 1, width=5)
283
+ pr_mat3d[t, cur_idx[t], 1: 6] = \
284
+ np.fromstring(' '.join(list(binary)), dtype=int, sep=' ')
285
+ cur_idx[t] += 1
286
+ pr_mat3d[np.arange(0, 32), cur_idx, 0] = pitch_eos_ind
287
+ return pr_mat3d
288
+
289
+
290
+ def get_low_high_dur_count(pr_mat):
291
+ # pr_mat (32, 128)
292
+ # return the maximum duration
293
+ # return the pitch range
294
+ # return the number of notes at each column
295
+
296
+ pitch_range = np.where(pr_mat != 0)[1]
297
+ low_pitch = pitch_range.min()
298
+ high_pitch = pitch_range.max()
299
+ pitch_dur = pr_mat.max()
300
+ num_notes = np.count_nonzero(pr_mat, axis=-1)
301
+ return low_pitch, high_pitch, pitch_dur, num_notes
Beat-Transformer/code/eight_fold_test.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import torch
4
+ import madmom
5
+ import numpy as np
6
+ from utils import AverageMeter
7
+ from torch.utils.data import DataLoader
8
+ from DilatedTransformer import Demixed_DilatedTransformerModel
9
+ from spectrogram_dataset import audioDataset
10
+ from tqdm import tqdm
11
+ import shutil
12
+
13
+ import warnings
14
+ warnings.filterwarnings('ignore')
15
+
16
+
17
+ FPS = 44100 / 1024
18
+ NUM_FOLDS = 8
19
+ DEVICE='cuda:0'
20
+ #model
21
+ NORM_FIRST=True
22
+ ATTN_LEN=5
23
+ INSTR=5
24
+ NTOKEN=2
25
+ DMODEL=256
26
+ NHEAD=8
27
+ DHID=1024
28
+ NLAYER=9
29
+ #directories
30
+ DATASET_PATH = "./data/demix_spectrogram_data.npz"
31
+ ANNOTATION_PATH = './data/full_beat_annotation.npz'
32
+ DATA_TO_LOAD = ['ballroom'] #'ballroom', 'hainsworth', 'smc', 'carnetic', 'harmonix'
33
+ TEST_ONLY = []
34
+ DEMO_SAVE_ROOT = './save/inference'
35
+ if not os.path.exists(DEMO_SAVE_ROOT):
36
+ os.makedirs(DEMO_SAVE_ROOT)
37
+
38
+
39
+ PARAM_PATH = {
40
+ 0: "./checkpoints/fold_0_trf_param.pt",
41
+ 1: "./checkpoints/fold_1_trf_param.pt",
42
+ 2: "./checkpoints/fold_2_trf_param.pt",
43
+ 3: "./checkpoints/fold_3_trf_param.pt",
44
+ 4: "./checkpoints/fold_4_trf_param.pt",
45
+ 5: "./checkpoints/fold_5_trf_param.pt",
46
+ 6: "./checkpoints/fold_6_trf_param.pt",
47
+ 7: "./checkpoints/fold_7_trf_param.pt"
48
+ }
49
+
50
+
51
+
52
+ def infer_activation():
53
+ """
54
+ run the model to predict (down-)beat activations
55
+ """
56
+ dataset = audioDataset(data_to_load=DATA_TO_LOAD,
57
+ test_only_data = TEST_ONLY,
58
+ data_path = DATASET_PATH,
59
+ annotation_path = ANNOTATION_PATH,
60
+ fps = FPS,
61
+ sample_size = None,
62
+ num_folds = NUM_FOLDS)
63
+
64
+ inference_pred = {}
65
+ beat_gt = {}
66
+ downbeat_gt = {}
67
+
68
+ for FOLD in range(NUM_FOLDS):
69
+ print(f'\nFold {FOLD}')
70
+ train_set, val_set, test_set = dataset.get_fold(fold=FOLD)
71
+ #loader = DataLoader(val_set, batch_size=1, shuffle=False)
72
+ loader = DataLoader(test_set, batch_size=1, shuffle=False)
73
+
74
+ model = Demixed_DilatedTransformerModel(attn_len=ATTN_LEN,
75
+ instr=INSTR,
76
+ ntoken=NTOKEN,
77
+ dmodel=DMODEL,
78
+ nhead=NHEAD,
79
+ d_hid=DHID,
80
+ nlayers=NLAYER,
81
+ norm_first=NORM_FIRST
82
+ )
83
+ #model.load_state_dict(torch.load(os.path.join(MODEL_PATH, f'Fold_{FOLD}', 'model', 'trf_param_012.pt'), map_location=torch.device('cpu')))
84
+ model.load_state_dict(torch.load(PARAM_PATH[FOLD], map_location=torch.device('cpu'))['state_dict'])
85
+ model.to(DEVICE)
86
+ model.eval()
87
+
88
+ with torch.no_grad():
89
+ for idx, (dataset_key, data, beat, downbeat, tempo, root) in tqdm(enumerate(loader), total=len(loader)):
90
+ #data
91
+ data = data.float().to(DEVICE)
92
+ pred, _ = model(data)
93
+ beat_pred = torch.sigmoid(pred[0, :, 0]).detach().cpu().numpy()
94
+ downbeat_pred = torch.sigmoid(pred[0, :, 1]).detach().cpu().numpy()
95
+
96
+ beat = torch.nonzero(beat[0]>.5)[:, 0].detach().numpy() / (FPS)
97
+ downbeat = torch.nonzero(downbeat[0]>.5)[:, 0].detach().numpy() / (FPS)
98
+
99
+ dataset_key = dataset_key[0]
100
+ root = root[0]
101
+ if not dataset_key in inference_pred:
102
+ inference_pred[dataset_key] = []
103
+ beat_gt[dataset_key] = []
104
+ downbeat_gt[dataset_key] = []
105
+ inference_pred[dataset_key].append(np.stack((beat_pred, downbeat_pred), axis=0))
106
+ beat_gt[dataset_key].append(beat)
107
+ downbeat_gt[dataset_key].append(downbeat)
108
+
109
+ #uncomment to save new activation inference
110
+ #print('saving prediction ...')
111
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'inference_pred.pkl'), 'wb') as f:
112
+ # pickle.dump( inference_pred, f)
113
+ #print('saving gt ...')
114
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'beat_gt.pkl'), 'wb') as f:
115
+ # pickle.dump(beat_gt, f)
116
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'down_gt.pkl'), 'wb') as f:
117
+ # pickle.dump(downbeat_gt, f)
118
+
119
+
120
+ def inference_dbn():
121
+ """
122
+ run DBN to decode activations (saved by the last function) to (down-)beat timesteps and calculate accuracy w.r.t. groundtruth
123
+ """
124
+ beat_DBN_meter = AverageMeter()
125
+ downbeat_DBN_meter = AverageMeter()
126
+
127
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS,
128
+ transition_lambda=100,
129
+ observation_lambda=6,
130
+ num_tempi=None,
131
+ threshold=0.2)
132
+
133
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS,
134
+ transition_lambda=100,
135
+ observation_lambda=6,
136
+ num_tempi=None,
137
+ threshold=0.2)
138
+
139
+ print('loading activations ...')
140
+ with open(os.path.join(DEMO_SAVE_ROOT, 'inference_pred.pkl'), 'rb') as f:
141
+ activations = pickle.load(f)
142
+ with open(os.path.join(DEMO_SAVE_ROOT, 'beat_gt.pkl'), 'rb') as f:
143
+ beat_gt = pickle.load(f)
144
+ with open(os.path.join(DEMO_SAVE_ROOT, 'down_gt.pkl'), 'rb') as f:
145
+ downbeat_gt = pickle.load(f)
146
+
147
+ for dataset_key in activations:
148
+ print(f'inferencing on {dataset_key} ...')
149
+ beat_error = 0
150
+ downbeat_error = 0
151
+ for i in tqdm(range(len(activations[dataset_key]))):
152
+ pred = activations[dataset_key][i]
153
+ #print(pred.shape)
154
+ beat = beat_gt[dataset_key][i]
155
+ downbeat = downbeat_gt[dataset_key][i]
156
+
157
+ try:
158
+ dbn_beat_pred = beat_tracker(pred[0])
159
+ beat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_beat_pred, beat)
160
+ beat_DBN_meter.update(f'{dataset_key}-fmeasure', beat_score_DBN.fmeasure)
161
+ beat_DBN_meter.update(f'{dataset_key}-cmlt', beat_score_DBN.cmlt)
162
+ beat_DBN_meter.update(f'{dataset_key}-amlt', beat_score_DBN.amlt)
163
+
164
+ except Exception as e:
165
+ #print(f'beat inference encounter exception {e}')
166
+ beat_error += 1
167
+
168
+
169
+ try:
170
+ combined_act = np.concatenate((np.maximum(pred[0] - pred[1], np.zeros(pred[0].shape))[:, np.newaxis], pred[1][:, np.newaxis]), axis=-1) #(T, 2)
171
+ #print(combined_act.shape)
172
+ dbn_downbeat_pred = downbeat_tracker(combined_act)
173
+ dbn_downbeat_pred = dbn_downbeat_pred[dbn_downbeat_pred[:, 1]==1][:, 0]
174
+
175
+ downbeat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_downbeat_pred, downbeat)
176
+ downbeat_DBN_meter.update(f'{dataset_key}-fmeasure', downbeat_score_DBN.fmeasure)
177
+ downbeat_DBN_meter.update(f'{dataset_key}-cmlt', downbeat_score_DBN.cmlt)
178
+ downbeat_DBN_meter.update(f'{dataset_key}-amlt', downbeat_score_DBN.amlt)
179
+ except Exception as e:
180
+ #print(f'downbeat inference encounter exception {e}')
181
+ downbeat_error += 1
182
+ print(f'beat error: {beat_error}; downbeat error: {downbeat_error}')
183
+
184
+ print('DBN beat detection')
185
+ for key in beat_DBN_meter.avg.keys():
186
+ print('\t', key, beat_DBN_meter.avg[key])
187
+
188
+ print('DBN downbeat detection')
189
+ for key in downbeat_DBN_meter.avg.keys():
190
+ print('\t', key, downbeat_DBN_meter.avg[key])
191
+
192
+
193
+ def demo_inference_dbn():
194
+ """
195
+ calculate accuracy and save the predicted (down-)beat timesteps into txt files
196
+ """
197
+ beat_DBN_meter = AverageMeter()
198
+ downbeat_DBN_meter = AverageMeter()
199
+
200
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS,
201
+ transition_lambda=100,
202
+ observation_lambda=6,
203
+ num_tempi=None,
204
+ threshold=0.2)
205
+
206
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS,
207
+ transition_lambda=100,
208
+ observation_lambda=6,
209
+ num_tempi=None,
210
+ threshold=0.2)
211
+ for dataset in DATA_TO_LOAD:
212
+ save_dir = os.path.join(DEMO_SAVE_ROOT, dataset)
213
+ print(f'Inferencing on {dataset} dataset ...')
214
+ for song in tqdm(os.listdir(save_dir)):
215
+ song_dir = os.path.join(save_dir, song)
216
+ beat_pred = np.loadtxt(os.path.join(song_dir, 'beat_activation.txt'))
217
+ downbeat_pred = np.loadtxt(os.path.join(song_dir, 'downbeat_activation.txt'))
218
+ beat_gt = np.loadtxt(os.path.join(song_dir, 'gt_beat.txt'))
219
+
220
+ dbn_beat_pred = beat_tracker(beat_pred)
221
+ np.savetxt(os.path.join(song_dir, 'dbn_beat_pred.txt'), dbn_beat_pred[:, np.newaxis])
222
+ beat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_beat_pred, beat_gt)
223
+
224
+ accuracy = [f'fmeasure\t{beat_score_DBN.fmeasure}\n', \
225
+ f'cmlt\t{beat_score_DBN.cmlt}\n', \
226
+ f'amlt\t{beat_score_DBN.amlt}\n']
227
+ with open(os.path.join(song_dir, 'accuracy.txt'), 'w') as f:
228
+ f.writelines(accuracy)
229
+
230
+ beat_DBN_meter.update(f'{dataset}-fmeasure', beat_score_DBN.fmeasure)
231
+ beat_DBN_meter.update(f'{dataset}-cmlt', beat_score_DBN.cmlt)
232
+ beat_DBN_meter.update(f'{dataset}-amlt', beat_score_DBN.amlt)
233
+
234
+
235
+ combined_act = np.concatenate((np.maximum(beat_pred - downbeat_pred, np.zeros(beat_pred.shape))[:, np.newaxis], downbeat_pred[:, np.newaxis]), axis=-1) #(T, 2)
236
+ #print(combined_act.shape)
237
+ dbn_downbeat_pred = downbeat_tracker(combined_act)
238
+ dbn_downbeat_pred = dbn_downbeat_pred[dbn_downbeat_pred[:, 1]==1][:, 0]
239
+ np.savetxt(os.path.join(song_dir, 'dbn_downbeat_pred.txt'), dbn_downbeat_pred[:, np.newaxis])
240
+
241
+ #downbeat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_downbeat_pred, downbeat)
242
+ #downbeat_DBN_meter.update(f'{dataset}-fmeasure', downbeat_score_DBN.fmeasure)
243
+ #downbeat_DBN_meter.update(f'{dataset}-cmlt', downbeat_score_DBN.cmlt)
244
+ #downbeat_DBN_meter.update(f'{dataset}-amlt', downbeat_score_DBN.amlt)
245
+
246
+ print('DBN beat detection')
247
+ for key in beat_DBN_meter.avg.keys():
248
+ print('\t', key, beat_DBN_meter.avg[key])
249
+
250
+ print('DBN downbeat detection')
251
+ for key in downbeat_DBN_meter.avg.keys():
252
+ print('\t', key, downbeat_DBN_meter.avg[key])
253
+
254
+
255
+
256
+ def infer_gtzan_activation():
257
+ """
258
+ predict (down-)beat activations for the test-only GTZAN dataset
259
+ """
260
+ dataset = audioDataset(data_to_load=['gtzan'],
261
+ test_only_data = ['gtzan'],
262
+ data_path = DATASET_PATH,
263
+ annotation_path = ANNOTATION_PATH,
264
+ fps = FPS,
265
+ sample_size = None,
266
+ num_folds = NUM_FOLDS)
267
+
268
+ inference_pred = {}
269
+ beat_gt = {}
270
+ downbeat_gt = {}
271
+
272
+ FOLD = 7
273
+ train_set, val_set, test_set = dataset.get_fold(fold=FOLD)
274
+ #loader = DataLoader(val_set, batch_size=1, shuffle=False)
275
+ loader = DataLoader(test_set, batch_size=1, shuffle=False)
276
+
277
+ model = Demixed_DilatedTransformerModel(attn_len=ATTN_LEN,
278
+ instr=INSTR,
279
+ ntoken=NTOKEN,
280
+ dmodel=DMODEL,
281
+ nhead=NHEAD,
282
+ d_hid=DHID,
283
+ nlayers=NLAYER,
284
+ norm_first=NORM_FIRST
285
+ )
286
+ #model.load_state_dict(torch.load(os.path.join(MODEL_PATH, f'Fold_{FOLD}', 'model', 'trf_param_012.pt'), map_location=torch.device('cpu')))
287
+ model.load_state_dict(torch.load(PARAM_PATH[FOLD], map_location=torch.device('cpu'))['state_dict'])
288
+ model.to(DEVICE)
289
+ model.eval()
290
+
291
+ with torch.no_grad():
292
+ for idx, (dataset_key, data, beat, downbeat, tempo, root) in tqdm(enumerate(loader), total=len(loader)):
293
+ #data
294
+ data = data.float().to(DEVICE)
295
+ pred, _ = model(data)
296
+ beat_pred = torch.sigmoid(pred[0, :, 0]).detach().cpu().numpy()
297
+ downbeat_pred = torch.sigmoid(pred[0, :, 1]).detach().cpu().numpy()
298
+
299
+ beat = torch.nonzero(beat[0]>.5)[:, 0].detach().numpy() / (FPS)
300
+ downbeat = torch.nonzero(downbeat[0]>.5)[:, 0].detach().numpy() / (FPS)
301
+
302
+ dataset_key = dataset_key[0]
303
+ if not dataset_key in inference_pred[FOLD]:
304
+ inference_pred[FOLD][dataset_key] = []
305
+ beat_gt[dataset_key] = []
306
+ downbeat_gt[dataset_key] = []
307
+ inference_pred[FOLD][dataset_key].append(np.stack((beat_pred, downbeat_pred), axis=0))
308
+ beat_gt[dataset_key].append(beat)
309
+ downbeat_gt[dataset_key].append(downbeat)
310
+
311
+
312
+ #uncomment to save new activation inference
313
+ #print('saving prediction ...')
314
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'inference_gtzan_pred.pkl'), 'wb') as f:
315
+ # pickle.dump( inference_pred, f)
316
+ #print('saving gt ...')
317
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'beat_gtzan_gt.pkl'), 'wb') as f:
318
+ # pickle.dump(beat_gt, f)
319
+ #with open(os.path.join(DEMO_SAVE_ROOT, 'down_gtzan_gt.pkl'), 'wb') as f:
320
+ # pickle.dump(downbeat_gt, f)
321
+
322
+
323
+
324
+ def inference_gtzan_dbn():
325
+ """
326
+ locate (down-)beat timesteps from activations for the test-only GTZAN dataset
327
+ """
328
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS,
329
+ transition_lambda=100,
330
+ observation_lambda=6,
331
+ num_tempi=None,
332
+ threshold=0.2)
333
+
334
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS,
335
+ transition_lambda=100,
336
+ observation_lambda=6,
337
+ num_tempi=None,
338
+ threshold=0.2)
339
+
340
+ print('loading activations ...')
341
+ with open(os.path.join(DEMO_SAVE_ROOT, 'inference_gtzan_pred.pkl'), 'rb') as f:
342
+ activations = pickle.load(f)
343
+ with open(os.path.join(DEMO_SAVE_ROOT, 'beat_gtzan_gt.pkl'), 'rb') as f:
344
+ beat_gt = pickle.load(f)
345
+ with open(os.path.join(DEMO_SAVE_ROOT, 'down_gtzan_gt.pkl'), 'rb') as f:
346
+ downbeat_gt = pickle.load(f)
347
+
348
+ dataset_key ='gtzan'
349
+ print(f'inferencing on {dataset_key} ...')
350
+ FOLD = 7
351
+ beat_DBN_meter = AverageMeter()
352
+ downbeat_DBN_meter = AverageMeter()
353
+ beat_error = 0
354
+ downbeat_error = 0
355
+ for i in tqdm(range(len(activations[FOLD][dataset_key]))):
356
+ pred = activations[FOLD][dataset_key][i]
357
+ #print(pred.shape)
358
+ beat = beat_gt[dataset_key][i]
359
+ downbeat = downbeat_gt[dataset_key][i]
360
+
361
+ try:
362
+ dbn_beat_pred = beat_tracker(pred[0])
363
+ beat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_beat_pred, beat)
364
+ beat_DBN_meter.update(f'{dataset_key}-fmeasure', beat_score_DBN.fmeasure)
365
+ beat_DBN_meter.update(f'{dataset_key}-cmlt', beat_score_DBN.cmlt)
366
+ beat_DBN_meter.update(f'{dataset_key}-amlt', beat_score_DBN.amlt)
367
+ except Exception as e:
368
+ #print(f'beat inference encounter exception {e}')
369
+ beat_error += 1
370
+
371
+
372
+ try:
373
+ combined_act = np.concatenate((np.maximum(pred[0] - pred[1], np.zeros(pred[0].shape))[:, np.newaxis], pred[1][:, np.newaxis]), axis=-1) #(T, 2)
374
+ #print(combined_act.shape)
375
+ dbn_downbeat_pred = downbeat_tracker(combined_act)
376
+ dbn_downbeat_pred = dbn_downbeat_pred[dbn_downbeat_pred[:, 1]==1][:, 0]
377
+
378
+ downbeat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_downbeat_pred, downbeat)
379
+ downbeat_DBN_meter.update(f'{dataset_key}-fmeasure', downbeat_score_DBN.fmeasure)
380
+ downbeat_DBN_meter.update(f'{dataset_key}-cmlt', downbeat_score_DBN.cmlt)
381
+ downbeat_DBN_meter.update(f'{dataset_key}-amlt', downbeat_score_DBN.amlt)
382
+ except Exception as e:
383
+ #print(f'downbeat inference encounter exception {e}')
384
+ downbeat_error += 1
385
+ print(f'beat error: {beat_error}; downbeat error: {downbeat_error}')
386
+
387
+ print('DBN beat detection')
388
+ for key in beat_DBN_meter.avg.keys():
389
+ print('\t', key, beat_DBN_meter.avg[key])
390
+
391
+ print('DBN downbeat detection')
392
+ for key in downbeat_DBN_meter.avg.keys():
393
+ print('\t', key, downbeat_DBN_meter.avg[key])
394
+
395
+ if __name__ == '__main__':
396
+ #infer_activation()
397
+ inference_dbn()
398
+ #infer_gtzan_activation
399
+ inference_gtzan_dbn()
400
+
401
+
402
+
403
+
Beat-Transformer/code/optimizer.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import itertools as it
4
+ from torch.optim import Optimizer
5
+ from collections import defaultdict
6
+
7
+ class Lookahead(Optimizer):
8
+ '''
9
+ PyTorch implementation of the lookahead wrapper.
10
+ Lookahead Optimizer: https://arxiv.org/abs/1907.08610
11
+ '''
12
+ def __init__(self, optimizer,alpha=0.5, k=6,pullback_momentum="none"):
13
+ '''
14
+ :param optimizer:inner optimizer
15
+ :param k (int): number of lookahead steps
16
+ :param alpha(float): linear interpolation factor. 1.0 recovers the inner optimizer.
17
+ :param pullback_momentum (str): change to inner optimizer momentum on interpolation update
18
+ '''
19
+ if not 0.0 <= alpha <= 1.0:
20
+ raise ValueError(f'Invalid slow update rate: {alpha}')
21
+ if not 1 <= k:
22
+ raise ValueError(f'Invalid lookahead steps: {k}')
23
+ self.optimizer = optimizer
24
+ self.param_groups = self.optimizer.param_groups
25
+ self.alpha = alpha
26
+ self.k = k
27
+ self.step_counter = 0
28
+ assert pullback_momentum in ["reset", "pullback", "none"]
29
+ self.pullback_momentum = pullback_momentum
30
+ self.state = defaultdict(dict)
31
+
32
+ # Cache the current optimizer parameters
33
+ for group in self.optimizer.param_groups:
34
+ for p in group['params']:
35
+ param_state = self.state[p]
36
+ param_state['cached_params'] = torch.zeros_like(p.data)
37
+ param_state['cached_params'].copy_(p.data)
38
+
39
+ def __getstate__(self):
40
+ return {
41
+ 'state': self.state,
42
+ 'optimizer': self.optimizer,
43
+ 'alpha': self.alpha,
44
+ 'step_counter': self.step_counter,
45
+ 'k':self.k,
46
+ 'pullback_momentum': self.pullback_momentum
47
+ }
48
+
49
+ def zero_grad(self):
50
+ self.optimizer.zero_grad()
51
+
52
+ def state_dict(self):
53
+ return self.optimizer.state_dict()
54
+
55
+ def load_state_dict(self, state_dict):
56
+ self.optimizer.load_state_dict(state_dict)
57
+
58
+ def _backup_and_load_cache(self):
59
+ """Useful for performing evaluation on the slow weights (which typically generalize better)
60
+ """
61
+ for group in self.optimizer.param_groups:
62
+ for p in group['params']:
63
+ param_state = self.state[p]
64
+ param_state['backup_params'] = torch.zeros_like(p.data)
65
+ param_state['backup_params'].copy_(p.data)
66
+ p.data.copy_(param_state['cached_params'])
67
+
68
+ def _clear_and_load_backup(self):
69
+ for group in self.optimizer.param_groups:
70
+ for p in group['params']:
71
+ param_state = self.state[p]
72
+ p.data.copy_(param_state['backup_params'])
73
+ del param_state['backup_params']
74
+
75
+ def step(self, closure=None):
76
+ """Performs a single Lookahead optimization step.
77
+ Arguments:
78
+ closure (callable, optional): A closure that reevaluates the model
79
+ and returns the loss.
80
+ """
81
+ loss = self.optimizer.step(closure)
82
+ self.step_counter += 1
83
+
84
+ if self.step_counter >= self.k:
85
+ self.step_counter = 0
86
+ # Lookahead and cache the current optimizer parameters
87
+ for group in self.optimizer.param_groups:
88
+ for p in group['params']:
89
+ param_state = self.state[p]
90
+ p.data.mul_(self.alpha).add_(1.0 - self.alpha, param_state['cached_params']) # crucial line
91
+ param_state['cached_params'].copy_(p.data)
92
+ if self.pullback_momentum == "pullback":
93
+ internal_momentum = self.optimizer.state[p]["momentum_buffer"]
94
+ self.optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.alpha).add_(
95
+ 1.0 - self.alpha, param_state["cached_mom"])
96
+ param_state["cached_mom"] = self.optimizer.state[p]["momentum_buffer"]
97
+ elif self.pullback_momentum == "reset":
98
+ self.optimizer.state[p]["momentum_buffer"] = torch.zeros_like(p.data)
99
+
100
+ return loss
Beat-Transformer/code/spectrogram_dataset.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import madmom
4
+ import torch
5
+ import librosa
6
+ import numpy as np
7
+ from torch.utils.data import Dataset
8
+ from scipy.ndimage import maximum_filter1d
9
+ from tqdm import tqdm
10
+ from matplotlib import pyplot as plt
11
+ import librosa.display
12
+ from scipy.interpolate import interp1d
13
+ from scipy.signal import argrelmax
14
+
15
+
16
+
17
+ class dataset_processing(Dataset):
18
+ def __init__(self, full_data,
19
+ full_annotation,
20
+ audio_files,
21
+ mode='train',
22
+ fold=0,
23
+ fps=44100/1024,
24
+ sample_size = 512,
25
+ num_folds=8,
26
+ mask_value=-1,
27
+ test_only = []
28
+ ):
29
+ self.fold = fold
30
+ self.num_folds = num_folds
31
+ self.fps = fps
32
+ self.mode = mode
33
+ self.sample_size = sample_size
34
+ self.MASK_VALUE = mask_value
35
+
36
+ self.data = []
37
+ self.beats = []
38
+ self.downbeats = []
39
+ self.tempi = []
40
+ self.root = []
41
+
42
+ if self.mode == 'train':
43
+ self.dataset_name = []
44
+ self.train_clip(full_data, full_annotation, test_only=test_only)
45
+
46
+ elif self.mode == 'validation' or self.mode == 'test':
47
+ self.dataset_name = []
48
+ self.audio_files = []
49
+ self.val_and_test_clip(full_data, full_annotation, audio_files, test_only=test_only)
50
+
51
+ full_data = None
52
+ full_annotation = None
53
+
54
+ def train_clip(self, full_data, full_annotation, num_tempo_bins=300, test_only=[]):
55
+ for fold_idx in tqdm(range(self.num_folds)):
56
+ if (fold_idx != self.fold) and (fold_idx != (self.fold+1)%self.num_folds):
57
+ for key in full_data:
58
+ if key == test_only:
59
+ continue
60
+ #print(f'processing {key} under fold {fold_idx}')
61
+ for song_idx in range(len(full_data[key][fold_idx])):
62
+ song = full_data[key][fold_idx][song_idx] #(t, 5, mel)
63
+ annotation = full_annotation[key][fold_idx][song_idx]
64
+ try:
65
+ #print(annotation, annotation.shape)
66
+ if len(annotation.shape) == 2:
67
+ beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
68
+ else:
69
+ beat = madmom.utils.quantize_events(annotation[:], fps=self.fps, length=len(song))
70
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
71
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
72
+ except:
73
+ beat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
74
+ print(f'beat load error at {key} dataset, skip it')
75
+
76
+ try:
77
+ downbeat = annotation[annotation[:, 1] == 1][:, 0]
78
+ downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
79
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
80
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
81
+ except:
82
+ downbeat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
83
+ if not ((key == 'smc') or (key == 'musicnet')):
84
+ print(f'downbeat load error at {key} dataset, skip it')
85
+
86
+ try:
87
+ #tempo = self.infer_tempo(annotation[:, 0])
88
+ #tempo = np.array([int(np.round(tempo))])
89
+ tempo = np.zeros(num_tempo_bins, dtype='float32')
90
+ if len(annotation.shape) == 2:
91
+ tempo[int(np.round(self.infer_tempo(annotation[:, 0])))] = 1
92
+ else:
93
+ tempo[int(np.round(self.infer_tempo(annotation[:])))] = 1
94
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
95
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
96
+ tempo = tempo/sum(tempo)
97
+ #tempo += np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.25)
98
+ except:
99
+ #tempo = np.array([self.MASK_VALUE])
100
+ tempo = np.ones(num_tempo_bins, dtype='float32') * self.MASK_VALUE
101
+
102
+ if self.sample_size is None:
103
+ self.dataset_name.append(key)
104
+ self.data.append(song)
105
+ self.beats.append(beat)
106
+ self.downbeats.append(downbeat)
107
+ self.tempi.append(tempo)
108
+ else:
109
+ if len(song) <= self.sample_size:
110
+ self.dataset_name.append(key)
111
+ self.data.append(song)
112
+ self.beats.append(beat)
113
+ self.downbeats.append(downbeat)
114
+ self.tempi.append(tempo)
115
+ else:
116
+ for i in range(0, len(song)-self.sample_size+1, self.sample_size):
117
+ self.dataset_name.append(key)
118
+ self.data.append(song[i: i+self.sample_size])
119
+ self.beats.append(beat[i: i+self.sample_size])
120
+ self.downbeats.append(downbeat[i: i+self.sample_size])
121
+ self.tempi.append(tempo)
122
+ if i + self.sample_size < len(song):
123
+ self.dataset_name.append(key)
124
+ self.data.append(song[len(song)-self.sample_size:])
125
+ self.beats.append(beat[len(song)-self.sample_size:])
126
+ self.downbeats.append(downbeat[len(song)-self.sample_size:])
127
+ self.tempi.append(tempo)
128
+
129
+
130
+ #print(len(self.data), len(self.beats), len(self.downbeats))
131
+
132
+ def val_and_test_clip(self, full_data, full_annotation, audio_files, num_tempo_bins=300, test_only=[]):
133
+ if self.mode == 'validation':
134
+ fold_idx = (self.fold+1)%self.num_folds
135
+ elif self.mode == 'test':
136
+ fold_idx = self.fold
137
+ for key in tqdm(full_data, total=len(full_data)):
138
+ #print(f'processing {key}')
139
+ if ((self.mode == 'validation') and (key in test_only)):
140
+ continue
141
+ for song_idx in range(len(full_data[key][fold_idx])):
142
+ song = full_data[key][fold_idx][song_idx]
143
+ annotation = full_annotation[key][fold_idx][song_idx]
144
+ audio_file = audio_files[key][fold_idx][song_idx]
145
+ try:
146
+ if len(annotation.shape) == 2:
147
+ beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
148
+ else:
149
+ beat = madmom.utils.quantize_events(annotation[:], fps=self.fps, length=len(song))
150
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
151
+ beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
152
+ except:
153
+ beat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
154
+ print(f'beat load error at {key} dataset, skip it')
155
+
156
+ try:
157
+ downbeat = annotation[annotation[:, 1] == 1][:, 0]
158
+ downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
159
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
160
+ downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
161
+ except:
162
+ downbeat = np.ones(len(song), dtype='float32') * self.MASK_VALUE
163
+ if not ((key == 'smc') or (key == 'musicnet')):
164
+ print(f'downbeat load error at {key} dataset, skip it')
165
+
166
+ try:
167
+ #tempo = self.infer_tempo(annotation[:, 0])
168
+ #tempo = np.array([int(np.round(tempo))])
169
+ tempo = np.zeros(num_tempo_bins, dtype='float32')
170
+ if len(annotation.shape) == 2:
171
+ tempo[int(np.round(self.infer_tempo(annotation[:, 0])))] = 1
172
+ else:
173
+ tempo[int(np.round(self.infer_tempo(annotation[:])))] = 1
174
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
175
+ tempo = np.maximum(tempo, maximum_filter1d(tempo, size=3) * 0.5)
176
+ tempo = tempo/sum(tempo)
177
+ except:
178
+ #tempo = np.array([self.MASK_VALUE])
179
+ tempo = np.ones(num_tempo_bins, dtype='float32') * self.MASK_VALUE
180
+
181
+ if self.sample_size is None:
182
+ self.dataset_name.append(key)
183
+ self.root.append(audio_file)
184
+ self.data.append(song)
185
+ self.beats.append(beat)
186
+ self.downbeats.append(downbeat)
187
+ self.tempi.append(tempo)
188
+ else:
189
+ eval_sample_size = int(44100/1024 * 420)
190
+ if len(song) <= eval_sample_size:
191
+ self.dataset_name.append(key)
192
+ self.root.append(audio_file)
193
+ self.data.append(song)
194
+ self.beats.append(beat)
195
+ self.downbeats.append(downbeat)
196
+ self.tempi.append(tempo)
197
+ else:
198
+ for i in range(0, len(song)-eval_sample_size+1, eval_sample_size):
199
+ self.dataset_name.append(key)
200
+ self.root.append(audio_file)
201
+ self.data.append(song[i: i+eval_sample_size])
202
+ self.beats.append(beat[i: i+eval_sample_size])
203
+ self.downbeats.append(downbeat[i: i+eval_sample_size])
204
+ self.tempi.append(tempo)
205
+ if i + eval_sample_size < len(song):
206
+ self.dataset_name.append(key)
207
+ self.root.append(audio_file)
208
+ self.data.append(song[len(song)-eval_sample_size:])
209
+ self.beats.append(beat[len(song)-eval_sample_size:])
210
+ self.downbeats.append(downbeat[len(song)-eval_sample_size:])
211
+ self.tempi.append(tempo)
212
+
213
+ def infer_tempo(self, beats, hist_smooth=4, no_tempo=-1):
214
+ ibis = np.diff(beats) * self.fps
215
+ bins = np.bincount(np.round(ibis).astype(int))
216
+ # if no beats are present, there is no tempo
217
+ if not bins.any():
218
+ return no_tempo
219
+ # smooth histogram bins
220
+ if hist_smooth > 0:
221
+ bins = madmom.audio.signal.smooth(bins, hist_smooth)
222
+ #print(bins)
223
+ intervals = np.arange(len(bins))
224
+ # create interpolation function
225
+ interpolation_fn = interp1d(intervals, bins, 'quadratic')
226
+ # generate new intervals with 1000x the resolution
227
+ intervals = np.arange(intervals[0], intervals[-1], 0.001)
228
+ tempi = 60.0 * self.fps / intervals
229
+ # apply quadratic interpolation
230
+ bins = interpolation_fn(intervals)
231
+ peaks = argrelmax(bins, mode='wrap')[0]
232
+ if len(peaks) == 0:
233
+ # no peaks, no tempo
234
+ return no_tempo
235
+ else:
236
+ # report only the strongest tempo
237
+ sorted_peaks = peaks[np.argsort(bins[peaks])[::-1]]
238
+ return tempi[sorted_peaks][0]
239
+
240
+ def __len__(self):
241
+ return len(self.data)
242
+
243
+ def __getitem__(self, index):
244
+ """x = np.sum(self.data[index], axis=1).transpose(1, 0) #(dmodel, T)
245
+ x = librosa.power_to_db(x, ref=np.max)
246
+ x = x.T[np.newaxis, :, :]
247
+ x = np.repeat(x, 5, axis=0)
248
+ return self.dataset_name[index], x, self.beats[index], self.downbeats[index], self.tempi[index]"""
249
+
250
+ x = np.transpose(self.data[index],( 1, 2, 0)) #5, dmodel, T
251
+ #x = x + .25 * np.sum(x, axis=0, keepdims=True)
252
+ #x = [librosa.power_to_db(x[i], ref=np.max) for i in range(x.shape[0])]
253
+
254
+ np.random.seed()
255
+ if self.mode == 'train':
256
+ p = np.random.rand()
257
+ if p < .5: #50% time use 5 subspectrograms
258
+ pass
259
+ else:
260
+ idx_sum = np.random.choice(len(x), size=2, replace=False)
261
+ x = [x[i] for i in range(len(x)) if i not in idx_sum] + [x[idx_sum[0]] + x[idx_sum[1]]]
262
+ q = np.random.rand()
263
+ if q < .6: #30% time use 4 subspectrograms
264
+ pass
265
+ else:
266
+ idx_sum = np.random.choice(len(x), size=2, replace=False)
267
+ x = [x[i] for i in range(len(x)) if i not in idx_sum] + [x[idx_sum[0]] + x[idx_sum[1]]]
268
+ r = np.random.rand()
269
+ if r < .5: #10% time use 3 subspectrograms
270
+ pass
271
+ else: #10% time use 2 subspectrograms
272
+ idx_sum = np.random.choice(len(x), size=2, replace=False)
273
+ x = [x[i] for i in range(len(x)) if i not in idx_sum] + [x[idx_sum[0]] + x[idx_sum[1]]]
274
+
275
+ x = [librosa.power_to_db(x[i], ref=np.max) for i in range(len(x))]
276
+ x = np.transpose(np.array(x), (0, 2, 1)) #T, instr, dmodel
277
+
278
+ if self.mode == 'test':
279
+ return self.dataset_name[index], x, self.beats[index], self.downbeats[index], self.tempi[index], self.root[index]
280
+ else:
281
+ return self.dataset_name[index], x, self.beats[index], self.downbeats[index], self.tempi[index]
282
+
283
+
284
+
285
+
286
+
287
+ class audioDataset(object):
288
+ def __init__(self, data_to_load=['ballroom', 'carnetic', 'gtzan', 'hainsworth', 'smc', 'harmonix'],
289
+ test_only_data = ['hainsworth'],
290
+ data_path="/data1/zhaojw/dataset/linear_spectrogram_data.npz",
291
+ annotation_path="/data1/zhaojw/dataset/beat_annotation.npz",
292
+ fps=44100/1024,
293
+ SEED = 0,
294
+ num_folds=8,
295
+ mask_value = -1,
296
+ sample_size = 512
297
+ ):
298
+
299
+ self.fps = fps
300
+ self.sample_size = sample_size
301
+ self.mask_value = mask_value
302
+ self.num_folds = num_folds
303
+ self.test_only_data = test_only_data
304
+
305
+ # load_linear_spectr = np.load(data_path, allow_pickle=True)
306
+ load_annotation = np.load(annotation_path, allow_pickle=True)
307
+
308
+ self.full_data = {}
309
+ self.full_annotation = {}
310
+ self.audio_files = {}
311
+ for key in load_annotation:
312
+ if key in data_to_load:
313
+ time1 = time.time()
314
+ print(f'loading {key} dataset ...')
315
+ # data = load_linear_spectr[key]
316
+ annotation = load_annotation[key]
317
+ # assert(len(data) == len(annotation))
318
+
319
+ with open(f'./data/audio_lists/{key}.txt', 'r') as f:
320
+ audio_root = f.readlines()
321
+ audio_root = [item.replace('\n', '') for item in audio_root]
322
+ assert(len(annotation) == len(audio_root))
323
+ print(f'finish loading {key} with shape {annotation.shape}, using {time.time()-time1}s.')
324
+ #fold split
325
+ self.full_data[key] = {}
326
+ self.full_annotation[key] = {}
327
+ self.audio_files[key] = {}
328
+ if key in self.test_only_data:
329
+ FOLD_SIZE = len(annotation) // num_folds
330
+ np.random.seed(SEED)
331
+ np.random.shuffle(data)
332
+ np.random.seed(SEED)
333
+ np.random.shuffle(annotation)
334
+ np.random.seed(SEED)
335
+ np.random.shuffle(audio_root)
336
+ for i in range(num_folds):
337
+ self.full_data[key][i] = audio_root[:]
338
+ self.full_annotation[key][i] = annotation[:]
339
+ self.audio_files[key][i] = audio_root[:]
340
+ else:
341
+ FOLD_SIZE = len(annotation) // num_folds
342
+ np.random.seed(SEED)
343
+ np.random.shuffle(data)
344
+ np.random.seed(SEED)
345
+ np.random.shuffle(annotation)
346
+ np.random.seed(SEED)
347
+ np.random.shuffle(audio_root)
348
+ for i in range(num_folds-1):
349
+ self.full_data[key][i] = data[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
350
+ self.full_annotation[key][i] = annotation[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
351
+ self.audio_files[key][i] = audio_root[i*FOLD_SIZE: (i+1)*FOLD_SIZE]
352
+ self.full_data[key][num_folds-1] = data[(num_folds-1)*FOLD_SIZE: len(data)]
353
+ self.full_annotation[key][num_folds-1] = annotation[(num_folds-1)*FOLD_SIZE: len(annotation)]
354
+ self.audio_files[key][num_folds-1] = audio_root[(num_folds-1)*FOLD_SIZE: len(audio_root)]
355
+ data = None
356
+ annotation = None
357
+
358
+ def get_fold(self, fold=0):
359
+ print('processing train_set')
360
+ train_set = dataset_processing(full_data=self.full_data,
361
+ full_annotation=self.full_annotation,
362
+ audio_files=None,
363
+ mode='train',
364
+ fps=self.fps,
365
+ fold=fold,
366
+ sample_size = self.sample_size,
367
+ num_folds=self.num_folds,
368
+ mask_value=self.mask_value,
369
+ test_only=self.test_only_data
370
+ )
371
+
372
+ print('processing val_set')
373
+ val_set = dataset_processing(full_data=self.full_data,
374
+ full_annotation=self.full_annotation,
375
+ audio_files=self.audio_files,
376
+ mode='validation',
377
+ fps=self.fps,
378
+ fold=fold,
379
+ sample_size=self.sample_size,
380
+ num_folds=self.num_folds,
381
+ mask_value=self.mask_value,
382
+ test_only=self.test_only_data
383
+ )
384
+
385
+ print('processing test_set')
386
+ test_set = dataset_processing(full_data=self.full_data,
387
+ full_annotation=self.full_annotation,
388
+ audio_files=self.audio_files,
389
+ mode='test',
390
+ fps=self.fps,
391
+ fold=fold,
392
+ sample_size=self.sample_size,
393
+ num_folds=self.num_folds,
394
+ mask_value=self.mask_value,
395
+ test_only=self.test_only_data
396
+ )
397
+ return train_set, val_set, test_set
398
+
399
+
400
+
401
+ if __name__ == '__main__':
402
+ from torch.utils.data import DataLoader
403
+ #data_to_load=['ballroom', 'carnetic', 'gtzan', 'hainsworth', 'smc', 'harmonix']
404
+ dataset = audioDataset(data_to_load=['ballroom', 'carnetic', 'gtzan', 'hainsworth', 'smc'],
405
+ test_only_data = ['gtzan'],
406
+ # data_path = "./data/demix_spectrogram_data.npz",
407
+ annotation_path = "/work/fast_data_yinghao/Beat-Transformer/data/full_beat_annotation.npz",
408
+ fps = 44100/1024,
409
+ sample_size = None,
410
+ num_folds = 8)
411
+ # Fold Splitting
412
+ train_set, val_set, test_set = dataset.get_fold(fold=0)
413
+ #train_loader = DataLoader(train_set, batch_size=1, shuffle=True)
414
+ #val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
415
+ test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
416
+ #for i, (key, data, beat, downbeat, tempo) in enumerate(val_data):
417
+ for i, (key, data, beat, downbeat, tempo, root) in enumerate(test_loader):
418
+ print('key:', key)
419
+ print('data:', data.shape)
420
+ print('beat:', beat.shape)
421
+ #print('beat:', torch.nonzero(beat))
422
+ print('downbeat:', downbeat.shape)
423
+ print('tempo:', tempo.shape)
424
+ print('audio_root:', root)
425
+ #print('downbeat:', torch.nonzero(downbeat))
426
+ break
427
+
Beat-Transformer/code/train.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
3
+ import sys
4
+ import time
5
+ import madmom
6
+ import torch
7
+ from tqdm import tqdm
8
+ from torch import nn
9
+ from torch import optim
10
+ from optimizer import Lookahead
11
+ from torch.utils.tensorboard import SummaryWriter
12
+ from torch.utils.data import DataLoader
13
+ from utils import AverageMeter, epoch_time, infer_beat_with_DBN, infer_downbeat_with_DBN
14
+ from spectrogram_dataset import audioDataset
15
+
16
+ from DilatedTransformer import Demixed_DilatedTransformerModel
17
+
18
+ import warnings
19
+ warnings.filterwarnings('ignore')
20
+
21
+
22
+ DEBUG_MODE = int(sys.argv[1])
23
+ FOLD = int(sys.argv[2])
24
+ GPU = int(sys.argv[3])
25
+ PROJECT_NAME = 'Beat_Transformer'
26
+
27
+ ###############################################################################
28
+ # Load config
29
+ ###############################################################################
30
+ #data
31
+ SAMPLE_SIZE = int(44100 / 1024 * 180)
32
+ INSTR =5
33
+ FPS = 44100 / 1024
34
+ NUM_FOLDS = 8
35
+ #model
36
+ NORM_FIRST=True
37
+ ATTN_LEN=5
38
+ NTOKEN=2
39
+ DMODEL=256
40
+ NHEAD=8
41
+ DHID=1024
42
+ NLAYER=9
43
+ DROPOUT=.1
44
+ #training
45
+ DEVICE=f'cuda:{GPU}'
46
+ TRAIN_BATCH_SIZE = 1
47
+ LEARNING_RATE = 1e-3
48
+ DECAY = 0.99995
49
+ N_EPOCH = 30
50
+ CLIP=.5
51
+ #directories
52
+ DATASET_PATH = './data/demix_spectrogram_data.npz'
53
+ ANNOTATION_PATH = './data/full_beat_annotation.npz'
54
+ DATA_TO_LOAD = ['ballroom', 'ballroom', 'gtzan', 'hainsworth', 'smc', 'harmonix', 'carnetic']
55
+ TEST_ONLY = ['gtzan']
56
+
57
+ SAVE_PATH = f'./save/train_log/{str(GPU).zfill(2)}_{PROJECT_NAME}'
58
+
59
+ if DEBUG_MODE:
60
+ N_EPOCH = 1
61
+ TRAIN_BATCH_SIZE = 1
62
+ DECAY = 0.9995
63
+ DATA_TO_LOAD = ['hainsworth'] #hainsworth, smc
64
+ SAVE_PATH = os.path.join(SAVE_PATH, 'debug')
65
+
66
+ print(f'\nProject initialized: {PROJECT_NAME}\n', flush=True)
67
+
68
+
69
+ print(f'\nFold {FOLD}')
70
+ ###############################################################################
71
+ # Initialize fold
72
+ ###############################################################################
73
+ project_path = os.path.join(SAVE_PATH, f'Fold_{FOLD}')
74
+
75
+ MODEL_PATH = os.path.join(project_path, 'model')
76
+ LOG_PATH = os.path.join(project_path, 'log')
77
+
78
+ if not os.path.exists(MODEL_PATH):
79
+ os.makedirs(MODEL_PATH)
80
+
81
+ if not os.path.exists(LOG_PATH):
82
+ os.makedirs(LOG_PATH)
83
+
84
+ loss_writer = SummaryWriter(os.path.join(LOG_PATH, 'loss'))
85
+ #beat_writer = SummaryWriter(os.path.join(LOG_PATH, 'beat_acc'))
86
+ beat_ll_writer = SummaryWriter(os.path.join(LOG_PATH, 'beat_likelihood'))
87
+ downbeat_ll_writer = SummaryWriter(os.path.join(LOG_PATH, 'downbeat_likelihood'))
88
+ beat_pr_writer = SummaryWriter(os.path.join(LOG_PATH, 'beat_precision'))
89
+ downbeat_pr_writer = SummaryWriter(os.path.join(LOG_PATH, 'downbeat_precision'))
90
+ beat_DBN_writer = SummaryWriter(os.path.join(LOG_PATH, 'beat_DBN_acc'))
91
+ #downbeat_writer = SummaryWriter(os.path.join(LOG_PATH, 'downbeat_acc'))
92
+ downbeat_DBN_writer = SummaryWriter(os.path.join(LOG_PATH, 'downbeat_DBN_acc'))
93
+
94
+
95
+ ###############################################################################
96
+ # model parameter
97
+ ###############################################################################
98
+ model = Demixed_DilatedTransformerModel(attn_len=ATTN_LEN,
99
+ instr=INSTR,
100
+ ntoken=NTOKEN,
101
+ dmodel=DMODEL,
102
+ nhead=NHEAD,
103
+ d_hid=DHID,
104
+ nlayers=NLAYER,
105
+ norm_first=NORM_FIRST,
106
+ dropout=DROPOUT
107
+ )
108
+
109
+ model.to(DEVICE)
110
+
111
+
112
+ ###############################################################################
113
+ # load data
114
+ ###############################################################################
115
+ dataset = audioDataset(data_to_load=DATA_TO_LOAD,
116
+ test_only_data = TEST_ONLY,
117
+ data_path = DATASET_PATH,
118
+ annotation_path = ANNOTATION_PATH,
119
+ fps = FPS,
120
+ sample_size = SAMPLE_SIZE,
121
+ num_folds = NUM_FOLDS)
122
+ # Fold Splitting
123
+ train_set, val_set, test_set = dataset.get_fold(fold=FOLD)
124
+ train_loader = DataLoader(train_set, batch_size=TRAIN_BATCH_SIZE, shuffle=True)
125
+ val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
126
+ #test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
127
+
128
+
129
+ ###############################################################################
130
+ # Optimizer and Criterion
131
+ ###############################################################################
132
+ optimizer = optim.RAdam(model.parameters(), lr=LEARNING_RATE)
133
+ optimizer = Lookahead(optimizer=optimizer, k=5, alpha=0.5)
134
+ #scheduler = MinExponentialLR(optimizer, gamma=DECAY, minimum=1e-5)
135
+ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=.2, patience=2, threshold=1e-3, min_lr=1e-7)
136
+ loss_func = nn.BCEWithLogitsLoss(reduction='none', pos_weight=torch.LongTensor([1, 1]).to(DEVICE))
137
+ loss_tempo = nn.BCELoss(reduction='none')
138
+
139
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS, transition_lambda=10, threshold=0.05)
140
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS, transition_lambda=10)
141
+
142
+
143
+ ###############################################################################
144
+ # Main
145
+ ###############################################################################
146
+ def train(model, train_loader, optimizer, scheduler, loss_func, loss_tempo, clip, epoch, device):
147
+ print('training ...', flush=True)
148
+ num_batch = len(train_loader)
149
+ loss_meter_b = AverageMeter()
150
+ loss_meter_t = AverageMeter()
151
+ beat_meter = AverageMeter()
152
+ beat_DBN_meter = AverageMeter()
153
+ downbeat_meter = AverageMeter()
154
+ downbeat_DBN_meter = AverageMeter()
155
+ nan_count = []
156
+ for idx, (dataset_key, data, beat_gt, downbeat_gt, tempo_gt) in tqdm(enumerate(train_loader), total=num_batch):
157
+ #try:
158
+ #data
159
+ data = data.float().to(device)
160
+ #annotation
161
+ beat_gt = beat_gt.to(device)
162
+ downbeat_gt = downbeat_gt.to(device)
163
+ gt = torch.cat([beat_gt.unsqueeze(-1), downbeat_gt.unsqueeze(-1)], dim=-1).float().to(device) #(batch, T', 2)
164
+ tempo_gt = tempo_gt.to(device)
165
+
166
+ optimizer.zero_grad()
167
+ pred, tempo = model(data)
168
+ #print(pred.shape, gt.shape)
169
+ valid_gt = gt.clone()
170
+ valid_gt[gt == -1] = 0
171
+ loss = loss_func(pred, valid_gt)
172
+ weight = (1 - torch.as_tensor(gt == -1, dtype=torch.int32)).to(device)
173
+ loss = (weight * loss).mean(dim=(0, 1)).sum()
174
+
175
+ valid_tempo_gt = tempo_gt.clone()
176
+ valid_tempo_gt[tempo_gt == -1] = 0
177
+ loss_t = loss_tempo(torch.softmax(tempo, dim=-1), valid_tempo_gt)
178
+ weight = (1 - torch.as_tensor(tempo_gt == -1, dtype=torch.int32)).to(device)
179
+ loss_t = (weight * loss_t).mean()
180
+ #except RuntimeError:
181
+ # continue
182
+
183
+ loss_meter_t.update('train/loss', loss_t.item())
184
+ loss_meter_b.update('train/loss', loss.item())
185
+ if ((dataset_key[0] == 'musicnet') and (-1 in tempo_gt)):
186
+ loss = loss * 0 #do not trust musicnet beat annotation if tempo is none
187
+
188
+ #try:
189
+ loss = loss + loss_t
190
+ #except RuntimeError:
191
+ # continue
192
+ if torch.isnan(loss):
193
+ nan_count.append(str(dataset_key)+'\n')
194
+ with open('./home/zhaojw/workspace/efficient_dilated_MultiSpec_Transformer/nancount.txt', 'w') as f:
195
+ f.writelines(nan_count)
196
+ continue
197
+
198
+ #downbeat_loss = loss_func(downbeat_pred, downbeat_gt)
199
+ loss.backward()
200
+
201
+ torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
202
+ optimizer.step()
203
+ #scheduler.step()
204
+
205
+ #binary_acc = binary_accuracy(pred[:, :, :2], gt[:, :, :2])
206
+ #loss_meter.update('train/loss', loss.item())
207
+ #loss_meter.update('train/binary_acc', binary_acc.item())
208
+
209
+ #beat_acc = beat_accuracy(pred[:, :, 0], gt[:, :, 0], FPS/DS_RATIO)
210
+ #for key in beat_acc:
211
+ # beat_meter.update('train/' + key, beat_acc[key])
212
+
213
+
214
+ #downbeat_acc = beat_accuracy(pred[:, :, 1], gt[:, :, 1], FPS/DS_RATIO)
215
+ #if not dataset_key[0] == 'smc':
216
+ # for key in downbeat_acc:
217
+ # downbeat_meter.update('train/' + key, downbeat_acc[key])
218
+
219
+
220
+ if DEBUG_MODE:
221
+ print('------------training------------', flush=True)
222
+ print('Epoch: [{0}][{1}/{2}]'.format(epoch+1, idx, num_batch), flush=True)
223
+ print('train beat loss:', loss.item()-loss_t.item(), flush=True)
224
+ print('train tempo loss:', loss_t.item(), flush=True)
225
+ #print('train binary batch accuracy', binary_acc.item(), flush=True)
226
+ #print('beat accuracy:', list(beat_acc.values()), flush=True)
227
+ #print('downbeat accuracy:', list(downbeat_acc.values()), flush=True)
228
+
229
+ loss_writer.add_scalar('train/loss_beat', loss_meter_b.avg['train/loss'], epoch * num_batch + idx)
230
+ loss_writer.add_scalar('train/loss_tempo', loss_meter_t.avg['train/loss'], epoch * num_batch + idx)
231
+ loss_writer.add_scalar('train/lr', optimizer.param_groups[0]['lr'], epoch * num_batch + idx)
232
+
233
+ #for key in beat_meter.avg.keys():
234
+ # if 'train' in key:
235
+ # beat_writer.add_scalar(key, beat_meter.avg[key], epoch * num_batch + idx)
236
+ #for key in beat_DBN_meter.avg.keys():
237
+ # if 'train' in key:
238
+ # beat_DBN_writer.add_scalar(key, beat_DBN_meter.avg[key], epoch * num_batch + idx)
239
+ #for key in downbeat_meter.avg.keys():
240
+ # if 'train' in key:
241
+ # downbeat_writer.add_scalar(key, downbeat_meter.avg[key], epoch * num_batch + idx)
242
+ #for key in downbeat_DBN_meter.avg.keys():
243
+ # if 'train' in key:
244
+ # downbeat_DBN_writer.add_scalar(key, downbeat_DBN_meter.avg[key], epoch * num_batch + idx)
245
+ return loss_meter_b, loss_meter_t, beat_meter, beat_DBN_meter, downbeat_meter, downbeat_DBN_meter
246
+
247
+
248
+ def evaluate(model, val_loader, loss_func, loss_tempo, epoch, device):
249
+ print('validating ...', flush=True)
250
+ num_batch = len(val_loader)
251
+ loss_meter_b = AverageMeter()
252
+ loss_meter_t = AverageMeter()
253
+ beat_meter = AverageMeter()
254
+ beat_DBN_meter = AverageMeter()
255
+ downbeat_meter = AverageMeter()
256
+ downbeat_DBN_meter = AverageMeter()
257
+ with torch.no_grad():
258
+ for idx, (dataset_key, data, beat_gt, downbeat_gt, tempo_gt) in tqdm(enumerate(val_loader), total=num_batch):
259
+ #try:
260
+ #data
261
+ data = data.float().to(device)
262
+ #annotation
263
+ beat_gt = beat_gt.to(device)
264
+ downbeat_gt = downbeat_gt.to(device)
265
+ gt = torch.cat([beat_gt.unsqueeze(-1), downbeat_gt.unsqueeze(-1)], dim=-1).float().to(device) #(batch, T', 2)
266
+ #tempo_gt = tempo_gt.reshape(-1).long().to(device)
267
+ tempo_gt = tempo_gt.float().to(device)
268
+
269
+ pred, tempo = model(data)
270
+
271
+ valid_gt = gt.clone()
272
+ valid_gt[gt == -1] = 0
273
+ loss = loss_func(pred, valid_gt)
274
+ weight = (1 - torch.as_tensor(gt == -1, dtype=torch.int32)).to(device)
275
+ loss = (weight * loss).mean(dim=(0, 1)).sum()
276
+
277
+ valid_tempo_gt = tempo_gt.clone()
278
+ valid_tempo_gt[tempo_gt == -1] = 0
279
+ loss_t = loss_tempo(torch.softmax(tempo, dim=-1), valid_tempo_gt)
280
+ weight = (1 - torch.as_tensor(tempo_gt == -1, dtype=torch.int32)).to(device)
281
+ loss_t = (weight * loss_t).mean()
282
+ #except RuntimeError:
283
+ # continue
284
+
285
+ if not dataset_key[0] == 'gtzan':
286
+ loss_meter_b.update('val/loss', loss.item())
287
+ else:
288
+ loss_meter_b.update('val/loss_nontrain', loss.item())
289
+
290
+ if not dataset_key[0] == 'gtzan':
291
+ loss_meter_t.update('val/loss', loss_t.item())
292
+ else:
293
+ loss_meter_t.update('val/loss_nontrain', loss_t.item())
294
+
295
+ #binary_acc = binary_accuracy(pred[:, :, :2], gt[:, :, :2])
296
+ #if not dataset_key[0][0] == 'gtzan':
297
+ # loss_meter.update('val/loss', loss.item())
298
+ # loss_meter.update('val/binary_acc', binary_acc.item())
299
+ #else:
300
+ # loss_meter.update('val/loss_nontrain', loss.item())
301
+ # loss_meter.update('val/binary_acc_nontrain', binary_acc.item())
302
+
303
+
304
+ #try:
305
+ #beat_acc = beat_accuracy(pred[:, :, 0], gt[:, :, 0], FPS/DS_RATIO)
306
+ #for key in beat_acc:
307
+ # beat_meter.update(f'val-{dataset_key[0][0]}/{key}', beat_acc[key])
308
+
309
+ beat_acc_DBN = infer_beat_with_DBN(pred[:, :, 0], beat_gt, beat_tracker, FPS)
310
+ for key in beat_acc_DBN:
311
+ beat_DBN_meter.update(f'val-{dataset_key[0]}/{key}', beat_acc_DBN[key])
312
+
313
+
314
+ #downbeat_acc = beat_accuracy(pred[:, :, 1], gt[:, :, 1], FPS/DS_RATIO)
315
+ #if not dataset_key[0][0] == 'smc':
316
+ # for key in downbeat_acc:
317
+ # downbeat_meter.update(f'val-{dataset_key[0][0]}/{key}', downbeat_acc[key])
318
+
319
+ downbeat_DBN_acc = infer_downbeat_with_DBN(pred[:, :, 0], pred[:, :, 1], downbeat_gt, downbeat_tracker, FPS)
320
+ if not dataset_key[0] == 'smc':
321
+ for key in downbeat_DBN_acc:
322
+ downbeat_DBN_meter.update(f'val-{dataset_key[0]}/{key}', downbeat_DBN_acc[key])
323
+
324
+
325
+ if DEBUG_MODE:
326
+ print('------------validation------------', flush=True)
327
+ print('Epoch: [{0}][{1}/{2}]'.format(epoch+1, idx, num_batch), flush=True)
328
+ print('val beat loss:', loss.item(), flush=True)
329
+ print('train tempo loss:', loss_t.item(), flush=True)
330
+ #print('val batch binary accuracy:', binary_acc.item(), flush=True)
331
+ #print('beat accuracy:', list(beat_acc.values()), flush=True)
332
+ print('beat accuracy with DBN:', list(beat_acc_DBN.values()), flush=True)
333
+ #print('downbeat accuracy:', list(downbeat_acc.values()), flush=True)
334
+ print('downbeat accuracy with DBN:', list(downbeat_DBN_acc.values()), flush=True)
335
+ #except Exception as e:
336
+ # print(e)
337
+
338
+
339
+ if not dataset_key[0] == 'gtzan':
340
+ loss_writer.add_scalar('val/loss_beat', loss_meter_b.avg['val/loss'], epoch)
341
+ loss_writer.add_scalar('val/loss_tempo', loss_meter_t.avg['val/loss'], epoch)
342
+ else:
343
+ loss_writer.add_scalar('val/loss_beat_nontrain', loss_meter_b.avg['val/loss_nontrain'], epoch)
344
+ loss_writer.add_scalar('val/loss_tempo_nontrain', loss_meter_t.avg['val/loss_nontrain'], epoch)
345
+
346
+
347
+ #for key in beat_meter.avg.keys():
348
+ # if 'val' in key:
349
+ # beat_writer.add_scalar(key, beat_meter.avg[key], epoch)
350
+ for key in beat_DBN_meter.avg.keys():
351
+ if 'val' in key:
352
+ beat_DBN_writer.add_scalar(key, beat_DBN_meter.avg[key], epoch)
353
+ #for key in downbeat_meter.avg.keys():
354
+ # if 'val' in key:
355
+ # downbeat_writer.add_scalar(key, downbeat_meter.avg[key], epoch)
356
+ for key in downbeat_DBN_meter.avg.keys():
357
+ if 'val' in key:
358
+ downbeat_DBN_writer.add_scalar(key, downbeat_DBN_meter.avg[key], epoch)
359
+ return loss_meter_b, loss_meter_t, beat_meter, beat_DBN_meter, downbeat_meter, downbeat_DBN_meter
360
+
361
+
362
+ for epoch in range(N_EPOCH):
363
+ print(f'Start Epoch: {epoch + 1:02}', flush=True)
364
+ start_time = time.time()
365
+
366
+ model.train()
367
+ _, _, _, _, _, _ = train(model, train_loader, optimizer, scheduler, loss_func, loss_tempo, CLIP, epoch, DEVICE)
368
+
369
+ model.eval()
370
+ #optimizer._backup_and_load_cache()
371
+ loss_meter_b, loss_meter_t, beat_meter, beat_DBN_meter, downbeat_meter, downbeat_DBN_meter = evaluate(model, val_loader, loss_func, loss_tempo, epoch, DEVICE)
372
+ #optimizer._clear_and_load_backup()
373
+
374
+ scheduler.step(loss_meter_b.avg['val/loss'] + loss_meter_t.avg['val/loss'])
375
+
376
+ #torch.save(model.state_dict(), os.path.join(MODEL_PATH, 'trf_param_'+str(epoch).zfill(3)+'.pt'))
377
+
378
+
379
+ torch.save({ 'epoch': epoch,
380
+ 'state_dict': model.state_dict(),
381
+ 'optimizer': optimizer.state_dict(),
382
+ 'scheduler': scheduler.state_dict(),
383
+ }, os.path.join(MODEL_PATH, 'trf_param_'+str(epoch).zfill(3)+'.pt'))
384
+
385
+
386
+ end_time = time.time()
387
+ epoch_mins, epoch_secs = epoch_time(start_time, end_time)
388
+ print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s', flush=True)
389
+
390
+ print('val beat loss:', loss_meter_b.avg['val/loss'], flush=True)
391
+ print('val tempo loss:', loss_meter_t.avg['val/loss'], flush=True)
392
+ #print('beat accuracy:', [(key.split('/')[-1], beat_meter.avg[key]) for key in beat_meter.avg.keys() if 'val' in key], flush=True)
393
+ print('beat accuracy with DBN:', [(key.split('/')[-1], beat_DBN_meter.avg[key]) for key in beat_DBN_meter.avg.keys() if 'val' in key], flush=True)
394
+ #print('downbeat accuracy:', [(key.split('/')[-1], downbeat_meter.avg[key]) for key in downbeat_meter.avg.keys() if 'val' in key], flush=True)
395
+ print('downbeat accuracy with DBN:', [(key.split('/')[-1], downbeat_DBN_meter.avg[key]) for key in downbeat_DBN_meter.avg.keys() if 'val' in key], flush=True)
396
+ print('\n')
Beat-Transformer/code/train.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+ python ./code/train.py 0 0 0;
3
+ python ./code/train.py 0 1 0;
4
+ python ./code/train.py 0 2 0;
5
+ python ./code/train.py 0 3 0;
6
+ python ./code/train.py 0 4 0;
7
+ python ./code/train.py 0 5 0;
8
+ python ./code/train.py 0 6 0;
9
+ python ./code/train.py 0 7 0
Beat-Transformer/code/utils.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json, sys, os
3
+ from torch import nn
4
+ import torch
5
+ from torch.distributions import kl_divergence, Normal
6
+ from torch.optim.lr_scheduler import ExponentialLR
7
+ import random
8
+ import madmom
9
+
10
+ class AverageMeter(object):
11
+ """Computes and stores the average and current value"""
12
+ def __init__(self):
13
+ self.reset()
14
+
15
+ def reset(self):
16
+ self.val = {}
17
+ self.avg = {}
18
+ self.sum = {}
19
+ self.count = {}
20
+
21
+ def update(self, key, val, n=1):
22
+ if not key in self.val:
23
+ self.val[key] = val
24
+ self.sum[key] = val * n
25
+ self.count[key] = n
26
+ self.avg[key] = self.sum[key] / self.count[key]
27
+ else:
28
+ self.val[key] = val
29
+ self.sum[key] += val * n
30
+ self.count[key] += n
31
+ self.avg[key] = self.sum[key] / self.count[key]
32
+
33
+ def binary_accuracy(beat_pred, beat_gt):
34
+ #beat: (B, T)
35
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
36
+ beat_pred = torch.as_tensor((torch.sigmoid(beat_pred) >= 0.5), dtype=torch.int32)
37
+ beat_gt = torch.as_tensor((beat_gt > 0.6), dtype=torch.int32)
38
+ positives = torch.as_tensor((beat_pred == beat_gt), dtype=torch.int32)
39
+ positives = positives * weight
40
+ binary_accuracy = positives.sum() / (weight.sum() + 1e-4)
41
+ return binary_accuracy
42
+
43
+ def beat_accuracy(beat_pred, beat_gt, fps):
44
+ #beat_pred: (B, L), estimation result
45
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
46
+ beat_pred = torch.sigmoid(beat_pred) * weight
47
+ beat_pred = torch.as_tensor((beat_pred - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
48
+ #beat_pred = (beat_pred / fps)
49
+ beat_gt = torch.as_tensor((beat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
50
+ #beat_gt = (beat_gt / fps)
51
+ #print(beat_gt)
52
+ batch_score = []
53
+ for idx in range(beat_pred.shape[0]):
54
+ #if (beat_gt[idx] == 0).all():
55
+ # continue
56
+ if np.sum(beat_gt[idx]) < 2:
57
+ continue
58
+ beat_pred_batch = np.nonzero(beat_pred[idx])[0] / fps
59
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
60
+ #print(beat_gt_batch)
61
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
62
+ batch_score.append(score)
63
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
64
+ return {"fmeasure": batch_score.fmeasure, \
65
+ #"cemgil": batch_score.cemgil, \
66
+ #"cmlc": batch_score.cmlc, \
67
+ "cmlt": batch_score.cmlt, \
68
+ #"amlc": batch_score.amlc, \
69
+ "amlt": batch_score.amlt}
70
+
71
+
72
+ def infer_beat_with_DBN(beat_pred, beat_gt, dbn_model, fps):
73
+ #beat_pred: (B, L), estimation result
74
+ weight = (1 - torch.as_tensor(beat_gt == -1, dtype=torch.int32))
75
+ beat_pred = (torch.sigmoid(beat_pred) * weight).detach().cpu().numpy()
76
+ #beat_pred = (beat_pred / fps)
77
+ beat_gt = torch.as_tensor((beat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
78
+ batch_score = []
79
+ for idx in range(beat_pred.shape[0]):
80
+ #if (beat_gt[idx] == 0).all():
81
+ # continue
82
+ if np.sum(beat_gt[idx]) < 2:
83
+ continue
84
+ try:
85
+ beat_pred_batch = dbn_model(beat_pred[idx])
86
+ except:
87
+ return {"fmeasure": 0, "cmlt": 0, "amlt": 0}
88
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
89
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
90
+ batch_score.append(score)
91
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
92
+ return {"fmeasure": batch_score.fmeasure if not np.isnan(batch_score.fmeasure) else 0, \
93
+ #"cemgil": batch_score.cemgil, \
94
+ #"cmlc": batch_score.cmlc, \
95
+ "cmlt": batch_score.cmlt if not np.isnan(batch_score.cmlt) else 0, \
96
+ #"amlc": batch_score.amlc, \
97
+ "amlt": batch_score.amlt if not np.isnan(batch_score.amlt) else 0}
98
+
99
+
100
+ def infer_downbeat_with_DBN(beat_pred, downbeat_pred, downbeat_gt, dbn_model, fps):
101
+ #beat_pred: (B, L), estimation result
102
+ beat_pred = torch.sigmoid(beat_pred).detach().cpu()
103
+ downbeat_pred = torch.sigmoid(downbeat_pred).detach().cpu()
104
+ combined_act = torch.cat((torch.maximum(beat_pred - downbeat_pred, torch.zeros(beat_pred.shape)).unsqueeze(-1), downbeat_pred.unsqueeze(-1)), dim=-1)
105
+ #beat_pred = (beat_pred / fps)
106
+ weight = (1 - torch.as_tensor(downbeat_gt == -1, dtype=torch.int32)).unsqueeze(-1).detach().cpu()
107
+ combined_act = (combined_act * weight).numpy()
108
+
109
+ beat_gt = torch.as_tensor((downbeat_gt - 0.5) > 0, dtype=torch.int32).detach().cpu().numpy()
110
+ batch_score = []
111
+ for idx in range(beat_pred.shape[0]):
112
+ #if (beat_gt[idx] == 0).all():
113
+ # continue
114
+ if np.sum(beat_gt[idx]) < 2:
115
+ continue
116
+ try:
117
+ beat_pred_batch = dbn_model(combined_act[idx])
118
+ beat_pred_batch = beat_pred_batch[beat_pred_batch[:, 1]==1][:, 0]
119
+ except:
120
+ return {"fmeasure": 0, "cmlt": 0, "amlt": 0}
121
+ beat_gt_batch = np.nonzero(beat_gt[idx])[0] / fps
122
+ score = madmom.evaluation.beats.BeatEvaluation(beat_pred_batch, beat_gt_batch)
123
+ batch_score.append(score)
124
+ batch_score = madmom.evaluation.beats.BeatMeanEvaluation(batch_score)
125
+ return {"fmeasure": batch_score.fmeasure if not np.isnan(batch_score.fmeasure) else 0, \
126
+ #"cemgil": batch_score.cemgil, \
127
+ #"cmlc": batch_score.cmlc, \
128
+ "cmlt": batch_score.cmlt if not np.isnan(batch_score.cmlt) else 0, \
129
+ #"amlc": batch_score.amlc, \
130
+ "amlt": batch_score.amlt if not np.isnan(batch_score.amlt) else 0}
131
+
132
+
133
+
134
+ def load_dataset_path(fn='model_config.json'):
135
+ with open(fn) as f:
136
+ paths = json.load(f)['dataset_path']
137
+
138
+ train_val_path = paths['hpc_data_path']
139
+ return train_val_path
140
+
141
+ def load_params_dict(key, fn='model_config.json'):
142
+ with open(fn) as f:
143
+ dict = json.load(f)[key]
144
+ return dict
145
+
146
+
147
+ def count_parameters(model):
148
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
149
+
150
+
151
+ def init_weights(m):
152
+ for name, param in m.named_parameters():
153
+ if 'weight' in name:
154
+ nn.init.normal_(param.data, mean=0, std=0.01)
155
+ else:
156
+ nn.init.constant_(param.data, 0)
157
+
158
+ def standard_normal(shape):
159
+ N = Normal(torch.zeros(shape), torch.ones(shape))
160
+ if torch.cuda.is_available():
161
+ N.loc = N.loc.cuda()
162
+ N.scale = N.scale.cuda()
163
+ return N
164
+
165
+
166
+ def loss_function_vae(recon_pitch, pitch, dist, pitch_criterion, normal,
167
+ weights=(1, .1)):
168
+ # bs = dist.mean.size(0)
169
+ #print(recon_pitch.shape, pitch.shape, recon_rhythm.shape, rhythm.shape)
170
+ pitch_loss = pitch_criterion(recon_pitch, pitch)
171
+ kl_div = kl_divergence(dist, normal).mean()
172
+ loss = weights[0] * pitch_loss + weights[1] * kl_div
173
+ return loss, pitch_loss, kl_div
174
+
175
+ def loss_function_discr(recon_mask, mask_gt, dist, mask_criterion, normal,
176
+ weights=(1, .1)):
177
+ # bs = dist.mean.size(0)
178
+ #print(recon_pitch.shape, pitch.shape, recon_rhythm.shape, rhythm.shape)
179
+ mask_loss = mask_criterion(recon_mask, mask_gt)
180
+ kl_div = kl_divergence(dist, normal).mean()
181
+ loss = weights[0] * mask_loss + weights[1] * kl_div
182
+ return loss, mask_loss, kl_div
183
+
184
+ def get_complement(mask_gt):
185
+ #mask_gt: (BT, 128)
186
+ complement = torch.zeros(mask_gt.shape).long().cuda()
187
+ for i in range(mask_gt.shape[0]):
188
+ if random.random() < 0.5:
189
+ low = max(mask_gt[i].max(0)[-1].item() - 5, 0)
190
+ high = min(mask_gt[i].max(0)[-1].item() + 6, 127)
191
+ else:
192
+ low = max(mask_gt[i].max(0)[-1].item() - 6, 0)
193
+ high = min(mask_gt[i].max(0)[-1].item() + 5, 127)
194
+ #print(low, high)
195
+ complement[i, low: high+1] = 1.
196
+ return complement - mask_gt
197
+
198
+
199
+ # Useful function for how long epochs take
200
+ def epoch_time(start_time, end_time):
201
+ elapsed_time = end_time - start_time
202
+ elapsed_mins = int(elapsed_time / 60)
203
+ elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
204
+ return elapsed_mins, elapsed_secs
205
+
206
+
207
+ class MinExponentialLR(ExponentialLR):
208
+ def __init__(self, optimizer, gamma, minimum, last_epoch=-1):
209
+ self.min = minimum
210
+ super(MinExponentialLR, self).__init__(optimizer, gamma, last_epoch=-1)
211
+
212
+ def get_lr(self):
213
+ return [
214
+ max(base_lr * self.gamma ** self.last_epoch, self.min)
215
+ for base_lr in self.base_lrs
216
+ ]
217
+
218
+
219
+ def scheduled_sampling(i, high=0.7, low=0.05):
220
+ x = 10 * (i - 0.5)
221
+ z = 1 / (1 + np.exp(x))
222
+ y = (high - low) * z + low
223
+ return y
224
+
225
+
226
+
227
+ def piano_roll_to_target(pr):
228
+ # pr: (32, 128, 3), dtype=bool
229
+
230
+ # Assume that "not (first_layer or second layer) = third_layer"
231
+ pr[:, :, 1] = np.logical_not(np.logical_or(pr[:, :, 0], pr[:, :, 2]))
232
+ # To int dtype can make addition work
233
+ pr = pr.astype(int)
234
+ # Initialize a matrix to store the duration of a note on the (32, 128) grid
235
+ pr_matrix = np.zeros((32, 128))
236
+
237
+ for i in range(31, -1, -1):
238
+ # At each iteration
239
+ # 1. Assure that the second layer accumulates the note duration
240
+ # 2. collect the onset notes in time step i, and mark it on the matrix.
241
+
242
+ # collect
243
+ onset_idx = np.where(pr[i, :, 0] == 1)[0]
244
+ pr_matrix[i, onset_idx] = pr[i, onset_idx, 1] + 1
245
+ if i == 0:
246
+ break
247
+ # Accumulate
248
+ # pr[i - 1, :, 1] += pr[i, :, 1]
249
+ # pr[i - 1, onset_idx, 1] = 0 # the onset note should be set 0.
250
+ pr[i, onset_idx, 1] = 0 # the onset note should be set 0.
251
+ pr[i - 1, :, 1] += pr[i, :, 1]
252
+
253
+ return pr_matrix
254
+
255
+
256
+ def target_to_3dtarget(pr_mat, max_note_count=11, max_pitch=107, min_pitch=22,
257
+ pitch_pad_ind=88, dur_pad_ind=2,
258
+ pitch_sos_ind=86, pitch_eos_ind=87):
259
+ """
260
+ :param pr_mat: (32, 128) matrix. pr_mat[t, p] indicates a note of pitch p,
261
+ started at time step t, has a duration of pr_mat[t, p] time steps.
262
+ :param max_note_count: the maximum number of notes in a time step,
263
+ including <sos> and <eos> tokens.
264
+ :param max_pitch: the highest pitch in the dataset.
265
+ :param min_pitch: the lowest pitch in the dataset.
266
+ :param pitch_pad_ind: see return value.
267
+ :param dur_pad_ind: see return value.
268
+ :param pitch_sos_ind: sos token.
269
+ :param pitch_eos_ind: eos token.
270
+ :return: pr_mat3d is a (32, max_note_count, 6) matrix. In the last dim,
271
+ the 0th column is for pitch, 1: 6 is for duration in binary repr. Output is
272
+ padded with <sos> and <eos> tokens in the pitch column, but with pad token
273
+ for dur columns.
274
+ """
275
+ pitch_range = max_pitch - min_pitch + 1 # including pad
276
+ pr_mat3d = np.ones((32, max_note_count, 6), dtype=int) * dur_pad_ind
277
+ pr_mat3d[:, :, 0] = pitch_pad_ind
278
+ pr_mat3d[:, 0, 0] = pitch_sos_ind
279
+ cur_idx = np.ones(32, dtype=int)
280
+ for t, p in zip(*np.where(pr_mat != 0)):
281
+ pr_mat3d[t, cur_idx[t], 0] = p - min_pitch
282
+ binary = np.binary_repr(int(pr_mat[t, p]) - 1, width=5)
283
+ pr_mat3d[t, cur_idx[t], 1: 6] = \
284
+ np.fromstring(' '.join(list(binary)), dtype=int, sep=' ')
285
+ cur_idx[t] += 1
286
+ pr_mat3d[np.arange(0, 32), cur_idx, 0] = pitch_eos_ind
287
+ return pr_mat3d
288
+
289
+
290
+ def get_low_high_dur_count(pr_mat):
291
+ # pr_mat (32, 128)
292
+ # return the maximum duration
293
+ # return the pitch range
294
+ # return the number of notes at each column
295
+
296
+ pitch_range = np.where(pr_mat != 0)[1]
297
+ low_pitch = pitch_range.min()
298
+ high_pitch = pitch_range.max()
299
+ pitch_dur = pr_mat.max()
300
+ num_notes = np.count_nonzero(pr_mat, axis=-1)
301
+ return low_pitch, high_pitch, pitch_dur, num_notes
Beat-Transformer/code/visiualize_attention.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
3
+ import madmom
4
+ import torch
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torch.utils.data import DataLoader
8
+ from DilatedTransformer import Demixed_DilatedTransformerModel
9
+ from spectrogram_dataset import audioDataset
10
+ import scipy
11
+ #import seaborn as sns
12
+ import matplotlib.pyplot as plt
13
+
14
+ from utils import AverageMeter
15
+
16
+ import warnings
17
+ warnings.filterwarnings('ignore')
18
+
19
+
20
+ #data
21
+ SAMPLE_SIZE = None
22
+ FPS = 44100/1024
23
+ NUM_FOLDS = 8
24
+ FOLD = 0
25
+ #model
26
+ DEVICE='cuda:0'
27
+ NORM_FIRST=True
28
+ ATTN_LEN=5
29
+ INSTR=5
30
+ NTOKEN=2
31
+ DMODEL=256
32
+ NHEAD=8
33
+ DHID=1024
34
+ NLAYER=9
35
+ #directories
36
+ DATASET_PATH = './data/demix_spectrogram_data.npz'
37
+ ANNOTATION_PATH = 'data/full_beat_annotation.npz'
38
+ MODEL_PATH = "./checkpoints/fold_6_trf_param.pt"
39
+ DATA_TO_LOAD = ['gtzan'] #'carnetic', 'harmonix' # ballroom, 'gtzan', 'hainsworth', 'smc'
40
+ TEST_ONLY = ['gtzan']
41
+ DEMO_SAVE_ROOT = './save/visualization'
42
+ if not os.path.exists(DEMO_SAVE_ROOT):
43
+ os.mkdir(DEMO_SAVE_ROOT)
44
+
45
+
46
+ model = Demixed_DilatedTransformerModel(attn_len=ATTN_LEN,
47
+ instr=INSTR,
48
+ ntoken=NTOKEN,
49
+ dmodel=DMODEL,
50
+ nhead=NHEAD,
51
+ d_hid=DHID,
52
+ nlayers=NLAYER,
53
+ norm_first=NORM_FIRST
54
+ )
55
+
56
+ model.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device('cpu'))['state_dict'])
57
+ model.to(DEVICE)
58
+ model.eval()
59
+
60
+
61
+ dataset = audioDataset(data_to_load=DATA_TO_LOAD,
62
+ test_only_data = TEST_ONLY,
63
+ data_path = DATASET_PATH,
64
+ annotation_path = ANNOTATION_PATH,
65
+ fps = FPS,
66
+ sample_size = None,
67
+ num_folds = NUM_FOLDS)
68
+
69
+ train_set, val_set, test_set = dataset.get_fold(fold=0)
70
+ loader = DataLoader(test_set, batch_size=1, shuffle=False)
71
+
72
+ beat_tracker = madmom.features.beats.DBNBeatTrackingProcessor(min_bpm=55.0, max_bpm=215.0, fps=FPS, transition_lambda=10, threshold=0.05)
73
+ downbeat_tracker = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], min_bpm=55.0, max_bpm=215.0, fps=FPS, transition_lambda=10)
74
+
75
+ #thresh_beat_meter = AverageMeter()
76
+ #pick_beat_meter = AverageMeter()
77
+ beat_DBN_meter = AverageMeter()
78
+ #thresh_downbeat_meter = AverageMeter()
79
+ #pick_downbeat_meter = AverageMeter()
80
+ downbeat_DBN_meter = AverageMeter()
81
+
82
+ with torch.no_grad():
83
+ for idx, (dataset_key, data, beat, downbeat, tempo, root) in tqdm(enumerate(loader), total=len(loader)):
84
+ #if idx == 0:
85
+ # continue
86
+ data = data.float().to(DEVICE) #(1, 5, T', 128)
87
+
88
+ dataset = dataset_key[0]
89
+ print(root)
90
+
91
+ #inference
92
+ pred, pred_t, attn = model.inference(data)
93
+
94
+ beat_pred = torch.sigmoid(pred[0, :, 0]).detach().cpu().numpy()
95
+ #np.savetxt(os.path.join(save_dir, 'beat_activation.txt'), beat_pred[:, np.newaxis])
96
+ downbeat_pred = torch.sigmoid(pred[0, :, 1]).detach().cpu().numpy()
97
+ #np.savetxt(os.path.join(save_dir, 'downbeat_activation.txt'), downbeat_pred[:, np.newaxis])
98
+
99
+ #gt beat
100
+ beat_gt = torch.nonzero(beat[0]>.5)[:, 0].detach().numpy() / (FPS)
101
+ dnb_beat_pred = beat_tracker(beat_pred)
102
+
103
+ downbeat_gt = torch.nonzero(downbeat[0]>.5)[:, 0].detach().numpy() / (FPS)
104
+ combined_act = np.concatenate((np.maximum(beat_pred - downbeat_pred, np.zeros(beat_pred.shape))[:, np.newaxis], downbeat_pred[:, np.newaxis]), axis=-1) #(T, 2)
105
+ dbn_downbeat_pred = downbeat_tracker(combined_act)
106
+ dbn_downbeat_pred = dbn_downbeat_pred[dbn_downbeat_pred[:, 1]==1][:, 0]
107
+
108
+ beat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dnb_beat_pred, beat_gt)
109
+
110
+ downbeat_score_DBN = madmom.evaluation.beats.BeatEvaluation(dbn_downbeat_pred, downbeat_gt)
111
+
112
+
113
+ fig = plt.figure(figsize=(20, 60))
114
+ for i in range(1, 10):
115
+ layer_attn = attn[i].transpose(-2, -1).squeeze(0).cpu().detach().numpy()
116
+ #layer_attn = np.mean(layer_attn, axis=0)
117
+ layer_attn = layer_attn[2]
118
+ #print(layer_attn.shape)
119
+
120
+ fig.add_subplot(9, 4, 4*i-3)
121
+ plt.imshow(layer_attn[0, :, :], cmap='viridis')
122
+ plt.vlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='r', linestyle=':', linewidth=.01)
123
+ plt.hlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='g', linestyle=':', linewidth=.01)
124
+
125
+ fig.add_subplot(9, 4, 4*i-2)
126
+ plt.imshow(layer_attn[1, :, :], cmap='viridis')
127
+ plt.vlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='r', linestyle=':', linewidth=.01)
128
+ plt.hlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='g', linestyle=':', linewidth=.01)
129
+
130
+ fig.add_subplot(9, 4, 4*i-1)
131
+ plt.imshow(layer_attn[2, :, :], cmap='viridis')
132
+ plt.vlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='r', linestyle=':', linewidth=.01)
133
+ plt.hlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='g', linestyle=':', linewidth=.01)
134
+
135
+ fig.add_subplot(9, 4, 4*i)
136
+ plt.imshow(layer_attn[3, :, :], cmap='viridis')
137
+ plt.vlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='r', linestyle=':', linewidth=.01)
138
+ plt.hlines(torch.nonzero(beat[0, :]>.5)[:, 0].detach().numpy(), 0, layer_attn.shape[-1], label='Beats', color='g', linestyle=':', linewidth=.01)
139
+
140
+ plt.show()
141
+ #fig = ax.get_figure()
142
+ print('saving...')
143
+ plt.savefig(f"{DEMO_SAVE_ROOT}/{root[0].split('/')[-1].replace('.wav', '')}_attention_paterns.pdf", format='pdf', dpi=1200)
144
+ #ax = sns.heatmap(attn[1])
145
+ #print(attn)
146
+
147
+
148
+ print('beat accuracy:', beat_score_DBN.fmeasure, beat_score_DBN.cmlt, beat_score_DBN.amlt)
149
+ print('dowbbeat accuracy:', downbeat_score_DBN.fmeasure, downbeat_score_DBN.cmlt, downbeat_score_DBN.amlt)
150
+ break
Beat-Transformer/data/SMC/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Beat-Transformer/data/SMC/HolzapfelEtAl12-taslp.pdf ADDED
Binary file (914 kB). View file
 
Beat-Transformer/data/SMC/SMC_MIREX_Readme.txt ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SMC_MIREX Readme
2
+
3
+ Hi,
4
+
5
+ In the archive there are 4 folders:
6
+
7
+ SMC_MIREX_Annotations
8
+ SMC_MIREX_Annotations_05_08_2014
9
+ SMC_MIREX_Audio
10
+ SMC_MIREX_Tags
11
+
12
+ They contain the following:
13
+
14
+ SMC_MIREX_Annotations
15
+ ---------------------
16
+
17
+ Ground truth beat annotations in seconds
18
+ The filenames are a bit confusing, their structure is not so important, but to explain:
19
+
20
+ SMC_001_2_1_1_a.txt means
21
+
22
+ file: SMC_001
23
+
24
+ other metrical interpretation: 2_1_1 (beats tapped in a 2:1 ratio, starting on beat 1 could also be acceptable) - NOTE this is not tested nor should it be used!
25
+
26
+ a: gives the name of the annotator (again this information is not useful)
27
+
28
+ SMC_MIREX_Annotations_05_08_2014
29
+ --------------------------------
30
+
31
+ The content of this directory is the same as above, except for the annotations for excerpts 056, 137, 153, 203 and 257 which have been updated to remove the final beat annotations which were out of range. Thanks to Andy Lambert for pointing this out.
32
+
33
+
34
+ SMC_MIREX_Audio
35
+ ---------------
36
+
37
+ The audio files (mono .wav at 44.1khz)
38
+ There are 217 in total, but they are numbered running up to 289.
39
+ Note, files 271 - 289 are "easy" compared to the rest which are hard.
40
+
41
+ SMC_MIREX_Tags
42
+ --------------
43
+
44
+ These are text files with tags that correspond to why the annotation was difficult, along with a code, e.g. f1 where 'f' is the name of the annotator and '1' is a confidence level (again this information is not very relevant for you).
45
+ These tags are probably not so useful, but might be good for post-hoc analysis of results.
46
+
47
+ Acknowledgment
48
+ --------------
49
+ If you use the dataset in your work, please cite the following paper:
50
+
51
+ Holzapfel, A.; Davies, M.E.P.; Zapata, J.R.; Oliveira, J.L.; Gouyon, F.; , "Selective Sampling for Beat Tracking Evaluation," Audio, Speech, and Language Processing, IEEE Transactions on , vol.20, no.9, pp.2539-2548, Nov. 2012
52
+ doi: 10.1109/TASL.2012.2205244
53
+ URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6220849&isnumber=6268383
54
+
55
+
56
+ Any questions, send me a mail: mdavies@inesctec.pt
57
+
58
+
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_048.tag ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ quiet accompaniment
2
+ slow tempo
3
+ o1
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_133.tag ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ quiet accompaniment
2
+ slow tempo
3
+ expressive timing
4
+ (poor sound quality)
5
+ lack of transient sounds
6
+ excerpt is the introduction of the song
7
+ (ternary meter)
8
+ m2
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_158.tag ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ slow tempo
2
+ changing time signature
3
+ expressive timing
4
+ (missing bass)
5
+ a3
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_208.tag ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ expressive timing
2
+ lack of transient sounds
3
+ ternary meter
4
+ (missing bass)
5
+ a2
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_217.tag ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ slow tempo
2
+ expressive timing
3
+ lack of transient sounds
4
+ (missing bass)
5
+ a1
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_261.tag ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradual tempo change
2
+ expressive timing
3
+ (missing bass)
4
+ (strong syncopation)
5
+ a2
Beat-Transformer/data/SMC/SMC_MIREX_Tags/SMC_279.tag ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ none
2
+ m1
Beat-Transformer/data/demix_spectrogram_data.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Download our processed data at https://drive.google.com/file/d/1LamSAEY5QsnY57cF6qH_0niesGGKkHtI/view?usp=sharing
2
+ Each piece is demixed into 5 stems and saved as spectrogram of shape (time, stem=5, mel_bin=128).
3
+ This file includes full processed spectrogram data from Ballroom, Hainsworth, Carnetic, Harmonix, SMC, and GTZAN.
Beat-Transformer/gtzan_test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
Beat-Transformer/hainsworth_1train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
Beat-Transformer/sft.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from pydub import AudioSegment
4
+ from scipy.ndimage import maximum_filter1d
5
+ import json
6
+ import hashlib
7
+ import tqdm
8
+ import os
9
+
10
+ from scipy.interpolate import interp1d
11
+ from scipy.signal import argrelmax
12
+
13
+
14
+
15
+ def infer_tempo(beats, fps, hist_smooth=4, no_tempo=-1):
16
+ import madmom
17
+ ibis = np.diff(beats) * fps
18
+ bins = np.bincount(np.round(ibis).astype(int))
19
+ if not bins.any():
20
+ return no_tempo
21
+ if hist_smooth > 0:
22
+ bins = madmom.audio.signal.smooth(bins, hist_smooth)
23
+ intervals = np.arange(len(bins))
24
+ interpolation_fn = interp1d(intervals, bins, 'quadratic')
25
+ intervals = np.arange(intervals[0], intervals[-1], 0.001)
26
+ tempi = 60.0 * fps / intervals
27
+ print(tempi)
28
+ bins = interpolation_fn(intervals)
29
+ peaks = argrelmax(bins, mode='wrap')[0]
30
+ if len(peaks) == 0:
31
+ return no_tempo
32
+ else:
33
+ sorted_peaks = peaks[np.argsort(bins[peaks])[::-1]]
34
+ return tempi[sorted_peaks][0]
35
+
36
+
37
+ def quantise(beats):
38
+ return [int(round(b * 25)) / 25 for b in beats]
39
+
40
+
41
+ def get_sample(excerpt_path, beats, existed_uuid_list, split="train", key="gtzan", type="beat"):
42
+ # print(f'processing {excerpt_path} ...')
43
+ # print(f'beats: {beats}')
44
+
45
+ data_sample = {
46
+ "instruction": "Identify and list the timestamps of all beats in this audio track. Use the format of `0.0s,0.54s,1.0ss, ...`",
47
+ "input": f"<|SOA|>{excerpt_path[len(PATH)+1:]}<|EOA|>",
48
+ "output": ",".join([f"{b}s" for b in beats]),
49
+ "uuid": "",
50
+ "audioid": excerpt_path[len(PATH)+1:], # exclude the '/' at the beginning, to enable os.join.path
51
+ "split": [split],
52
+ "task_type": {"major": ["global_MIR"], "minor": ["beat_tracking"]},
53
+ "domain": "music",
54
+ "source": key,
55
+ "other": {}
56
+ }
57
+ if type == "downbeat":
58
+ data_sample["instruction"] = "Identify and list the timestamps of all downbeats in this audio track. Use the format of `0.0s,1.54s,3.0s, ...`"
59
+ data_sample["task_type"]["minor"] = ["downbeat_tracking"]
60
+
61
+ # change uuid
62
+ uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}"
63
+ unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位
64
+ if unique_id in existed_uuid_list:
65
+ sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID
66
+ unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16]
67
+ existed_uuid_list.add(unique_id)
68
+ data_sample["uuid"] = f"{unique_id}"
69
+ return data_sample
70
+
71
+
72
+ EXCERPT_LENGTH = 30 * 1000 # 30 seconds in milliseconds
73
+ MIN_LENGTH = 5 * 1000 # 5 seconds in milliseconds
74
+
75
+ PATH = '/work/fast_data_yinghao/Beat-Transformer/data'
76
+ load_annotation = np.load(f'{PATH}/full_beat_annotation.npz', allow_pickle=True)
77
+
78
+ for key in ["ballroom"]: #"rwc", "ballroom", "gtzan", "hainsworth", "carnetic", "smc"
79
+ # ballroom, GTZAN 30s, beat & downbeat
80
+ # hainsworth, (RWC,) carnetic: split audio, beat & downbeat
81
+ # smc: split audio, beat
82
+ annotation = load_annotation[key]
83
+
84
+ with open(f'{PATH}/audio_lists/{key}.txt', 'r') as f:
85
+ audio_root = f.readlines()
86
+ audio_root = [item.replace('\n', '') for item in audio_root]
87
+ audio_root = [f'{PATH}/{item[37:]}' for item in audio_root]
88
+ assert(len(annotation) == len(audio_root))
89
+
90
+ existed_uuid_list = set()
91
+ data_samples = []
92
+ for idx, ann in tqdm.tqdm(enumerate(annotation)):
93
+ # print(f'processing {audio_root[idx]} ...')
94
+ audio_path = audio_root[idx]
95
+ if len(ann.shape) == 1:
96
+ beats = quantise(ann)
97
+ downbeats = None
98
+ elif key != "rwc":
99
+ beats = quantise(ann[:,0])
100
+ downbeats = quantise(ann[ann[:, 1] == 1, 0])
101
+ else:
102
+ NotImplementedError
103
+ # beat = madmom.utils.quantize_events(annotation[:, 0], fps=self.fps, length=len(song))
104
+ # beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
105
+ # beat = np.maximum(beat, maximum_filter1d(beat, size=3) * 0.5)
106
+ # downbeat = annotation[annotation[:, 1] == 1][:, 0]
107
+ # downbeat = madmom.utils.quantize_events(downbeat, fps=self.fps, length=len(song))
108
+ # downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
109
+ # downbeat = np.maximum(downbeat, maximum_filter1d(downbeat, size=3) * 0.5)
110
+
111
+ # print(f'tempo: {tempo}')
112
+
113
+ if key =="ballroom":
114
+ # tempo = infer_tempo(beats, fps=100)
115
+ sample = get_sample(audio_path, beats, existed_uuid_list, key=key)
116
+ data_samples.append(sample)
117
+ sample = get_sample(audio_path, downbeats, existed_uuid_list, key=key, type="downbeat")
118
+ data_samples.append(sample)
119
+ elif key == "gtzan":
120
+ if "jazz.00054" in audio_path:
121
+ continue
122
+ sample = get_sample(audio_path, beats, existed_uuid_list, split="test", key=key)
123
+ data_samples.append(sample)
124
+ if downbeats:
125
+ sample = get_sample(audio_path, downbeats, existed_uuid_list, split="test", key=key, type="downbeat")
126
+ data_samples.append(sample)
127
+ else:
128
+ audio = AudioSegment.from_file(audio_path)
129
+ for i in range(0, len(audio), EXCERPT_LENGTH):
130
+ end = i + EXCERPT_LENGTH
131
+ if end < len(audio):
132
+ excerpt = audio[i:end]
133
+ else:
134
+ excerpt = audio[i:]
135
+ # Discard short audio clips
136
+ if len(excerpt) < MIN_LENGTH:
137
+ break
138
+ end = len(audio)
139
+
140
+ # # Save the excerpt to the same directory with a new name
141
+ excerpt_path = f"{audio_path[:-4]}_{i//EXCERPT_LENGTH}.wav"
142
+ if not os.path.exists(excerpt_path):
143
+ excerpt.export(excerpt_path, format="wav")
144
+
145
+ excerpt_beats = [b%30 for b in beats if i * 30 <= b <= (i + 1) * 30]
146
+ if downbeats:
147
+ excerpt_downbeats = [db%30 for db in downbeats if i * 30 <= db <= (i + 1) * 30]
148
+ else:
149
+ excerpt_downbeats = None
150
+
151
+ # tempo = infer_tempo(excerpt_beats, fps=100)
152
+ sample = get_sample(excerpt_path, excerpt_beats, existed_uuid_list, key=key)
153
+ data_samples.append(sample)
154
+ if downbeats:
155
+ sample = get_sample(excerpt_path, excerpt_downbeats, existed_uuid_list, key=key, type="downbeat")
156
+ data_samples.append(sample)
157
+ # Remove the original audio file
158
+ # os.remove(audio_path)
159
+
160
+ # break
161
+
162
+
163
+ split = "test" if key == "gtzan" else "train"
164
+ output_file_path = f'{PATH}/../{key}_{split}.jsonl' # Replace with the desired output path
165
+ with open(output_file_path, 'w') as outfile:
166
+ # for sample in data_samples:
167
+ json.dump(data_samples, outfile)
168
+
169
+ # outfile.write('\n')
170
+ outfile.close()
Beat-Transformer/smc_1train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
MSD/1/0/1004941.clip.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236758527234bf8392340a121b539ac88a7cd29e88e09e401bfba55b1f82a532
3
+ size 1201258
MSD/1/0/1026549.clip.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8bdd76e1cd27cd0a6d0c25c7ff96512f93ba2b23c2e4d568f4982012a3bd963
3
+ size 2880042
MSD/1/0/1036772.clip.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a70d91b427f0455b38f12b53d8641f4164255a8d156e544cdf9a6013f477c20
3
+ size 1437518