hahmadraz commited on
Commit
4ef0f28
1 Parent(s): 90219d1

Upload 10 files

Browse files
Files changed (10) hide show
  1. CKPT.yaml +4 -0
  2. brain.ckpt +3 -0
  3. counter.ckpt +3 -0
  4. dataloader-TRAIN.ckpt +3 -0
  5. decoder.ckpt +3 -0
  6. encoder.ckpt +3 -0
  7. hyperparams.yaml +199 -0
  8. masknet.ckpt +3 -0
  9. optimizer.ckpt +3 -0
  10. scaler.ckpt +3 -0
CKPT.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # yamllint disable
2
+ brain_intra_epoch_ckpt: true
3
+ end-of-epoch: false
4
+ unixtime: 1707661105.5904214
brain.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78e3fa769f51ed705833cacab0a9d4ffa8c57d70d8f0e20386d18993275ab01f
3
+ size 65
counter.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:535fa30d7e25dd8a49f1536779734ec8286108d115da5045d77f3b4185d8f790
3
+ size 2
dataloader-TRAIN.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73dfb9292db400ebd8d25044a47f826e5e81381153b2baddfed263a7532c4d6c
3
+ size 5
decoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ccee83e6a46f547bcd2472a33137bac10e458f0e4da0416c400c9ff8c3d06dd
3
+ size 17195
encoder.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6810a5a62de05a1323680befdd63897f802031c3c2b25f9022b8dba2a1351fab
3
+ size 17259
hyperparams.yaml ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated 2024-02-06 from:
2
+ # /mnt3/speechbrain/recipes/LibriMix/separation/hparams/sepformer-libri3mix.yaml
3
+ # yamllint disable
4
+ # ################################
5
+ # Model: SepFormer for source separation
6
+ # https://arxiv.org/abs/2010.13154
7
+ # Dataset : Libri3Mix
8
+ # ################################
9
+ #
10
+ # Basic parameters
11
+ # Seed needs to be set at top of yaml, before objects with parameters are made
12
+ #
13
+ seed: 1234
14
+ __set_seed: !apply:torch.manual_seed [1234]
15
+
16
+ # Data params
17
+
18
+ # e.g. '/yourpath/Libri3Mix/train-clean-360/'
19
+ # the data folder is needed even if dynamic mixing is applied
20
+ data_folder: /mnt3/Libri3Mix_48k_own/Libri3Mix/
21
+
22
+ # This is needed only if dynamic mixing is applied
23
+ base_folder_dm: /yourpath/LibriSpeech/train-clean-360/
24
+
25
+ experiment_name: sepformer-libri3mix-48k
26
+ output_folder: results/sepformer-libri3mix-48k/1234
27
+ train_log: results/sepformer-libri3mix-48k/1234/train_log.txt
28
+ save_folder: results/sepformer-libri3mix-48k/1234/save
29
+ train_data: results/sepformer-libri3mix-48k/1234/save/libri3mix_train-360.csv
30
+ valid_data: results/sepformer-libri3mix-48k/1234/save/libri3mix_test.csv
31
+ test_data: results/sepformer-libri3mix-48k/1234/save/libri3mix_test.csv
32
+ skip_prep: false
33
+
34
+ ckpt_interval_minutes: 60
35
+
36
+ # Experiment params
37
+ precision: fp16 # bf16, fp16 or fp32 # Set it to True for mixed precision
38
+ num_spks: 3
39
+ noprogressbar: false
40
+ save_audio: false # Save estimated sources on disk
41
+ sample_rate: 48000
42
+
43
+ # Training parameters
44
+ N_epochs: 50
45
+ batch_size: 1
46
+ lr: 0.0005
47
+ clip_grad_norm: 5
48
+ loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
49
+ # if True, the training sequences are cut to a specified length
50
+ limit_training_signal_len: true
51
+ # this is the length of sequences if we choose to limit
52
+ # the signal length of training sequences
53
+ training_signal_len: 100000
54
+
55
+ # Set it to True to dynamically create mixtures at training time
56
+ dynamic_mixing: false
57
+ use_wham_noise: true
58
+
59
+ # Parameters for data augmentation
60
+ use_wavedrop: false
61
+ use_speedperturb: true
62
+ use_rand_shift: false
63
+ min_shift: -8000
64
+ max_shift: 8000
65
+
66
+ # Speed perturbation
67
+ speed_changes: &id001 [95, 100, 105]
68
+
69
+ # Frequency drop: randomly drops a number of frequency bands to zero.
70
+ speed_perturb: !new:speechbrain.augment.time_domain.SpeedPerturb
71
+ orig_freq: 48000
72
+ speeds: *id001
73
+ drop_freq_low: 0 # Min frequency band dropout probability
74
+ drop_freq_high: 1 # Max frequency band dropout probability
75
+ drop_freq_count_low: 1 # Min number of frequency bands to drop
76
+ drop_freq_count_high: 3 # Max number of frequency bands to drop
77
+ drop_freq_width: 0.05 # Width of frequency bands to drop
78
+
79
+ drop_freq: !new:speechbrain.augment.time_domain.DropFreq
80
+ drop_freq_low: 0
81
+ drop_freq_high: 1
82
+ drop_freq_count_low: 1
83
+ drop_freq_count_high: 3
84
+ drop_freq_width: 0.05
85
+
86
+ # Time drop: randomly drops a number of temporal chunks.
87
+ drop_chunk_count_low: 1 # Min number of audio chunks to drop
88
+ drop_chunk_count_high: 5 # Max number of audio chunks to drop
89
+ drop_chunk_length_low: 1000 # Min length of audio chunks to drop
90
+ drop_chunk_length_high: 2000 # Max length of audio chunks to drop
91
+
92
+ drop_chunk: !new:speechbrain.augment.time_domain.DropChunk
93
+ drop_length_low: 1000
94
+ drop_length_high: 2000
95
+ drop_count_low: 1
96
+ drop_count_high: 5
97
+
98
+ # loss thresholding -- this thresholds the training loss
99
+ threshold_byloss: true
100
+ threshold: -30
101
+
102
+ # Encoder parameters
103
+ N_encoder_out: 256
104
+ out_channels: 256
105
+ kernel_size: 16
106
+ kernel_stride: 8
107
+ d_ffn: 1024
108
+
109
+ # Dataloader options
110
+ dataloader_opts:
111
+ batch_size: 1
112
+ num_workers: 3
113
+
114
+
115
+ # Specifying the network
116
+ Encoder: &id004 !new:speechbrain.lobes.models.dual_path.Encoder
117
+ kernel_size: 16
118
+ out_channels: 256
119
+
120
+
121
+ SBtfintra: &id002 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
122
+ num_layers: 8
123
+ d_model: 256
124
+ nhead: 8
125
+ d_ffn: 1024
126
+ dropout: 0
127
+ use_positional_encoding: true
128
+ norm_before: true
129
+
130
+ SBtfinter: &id003 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
131
+ num_layers: 8
132
+ d_model: 256
133
+ nhead: 8
134
+ d_ffn: 1024
135
+ dropout: 0
136
+ use_positional_encoding: true
137
+ norm_before: true
138
+
139
+ MaskNet: &id006 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
140
+
141
+ num_spks: 3
142
+ in_channels: 256
143
+ out_channels: 256
144
+ num_layers: 2
145
+ K: 250
146
+ intra_model: *id002
147
+ inter_model: *id003
148
+ norm: ln
149
+ linear_layer_after_inter_intra: false
150
+ skip_around_intra: true
151
+
152
+ Decoder: &id005 !new:speechbrain.lobes.models.dual_path.Decoder
153
+ in_channels: 256
154
+ out_channels: 1
155
+ kernel_size: 16
156
+ stride: 8
157
+ bias: false
158
+
159
+ optimizer: !name:torch.optim.Adam
160
+ lr: 0.0005
161
+ weight_decay: 0
162
+
163
+ loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
164
+
165
+ lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
166
+ factor: 0.5
167
+ patience: 2
168
+ dont_halve_until_epoch: 5
169
+
170
+ epoch_counter: &id007 !new:speechbrain.utils.epoch_loop.EpochCounter
171
+ # lr_scheduler: !ref <lr_scheduler>
172
+
173
+ limit: 50
174
+
175
+ modules:
176
+ encoder: *id004
177
+ decoder: *id005
178
+ masknet: *id006
179
+ checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
180
+ checkpoints_dir: results/sepformer-libri3mix-48k/1234/save
181
+ recoverables:
182
+ encoder: *id004
183
+ decoder: *id005
184
+ masknet: *id006
185
+ counter: *id007
186
+ train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
187
+ save_file: results/sepformer-libri3mix-48k/1234/train_log.txt
188
+
189
+ # If you do not want to use the pretrained separator you can simply delete pretrained_separator field.
190
+ pretrained_separator: !new:speechbrain.utils.parameter_transfer.Pretrainer
191
+ collect_in: results/sepformer-libri3mix-48k/1234/save
192
+ loadables:
193
+ encoder: *id004
194
+ decoder: *id005
195
+ masknet: *id006
196
+ paths:
197
+ encoder: speechbrain/sepformer-wsj03mix/encoder.ckpt
198
+ decoder: speechbrain/sepformer-wsj03mix/decoder.ckpt
199
+ masknet: speechbrain/sepformer-wsj03mix/masknet.ckpt
masknet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0380766af47473cf2d44832f6dc486e23e00e9bb975356667a6f5f3bb89c5f12
3
+ size 113365891
optimizer.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef3da9f2654d3f5f374c1a6e7a5668f03277b0e7f8c7b1ec3014a3132ef0785c
3
+ size 206306100
scaler.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3533486347c0da0c28d70910a55c5815424eaa310d2a916b487955c09a3a839
3
+ size 557