|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seed: 1234 |
|
__set_seed: !apply:torch.manual_seed [1234] |
|
|
|
|
|
|
|
|
|
|
|
data_folder: /miniscratch/subakany/LibriMixData_new/Libri2Mix/ |
|
|
|
|
|
|
|
|
|
|
|
base_folder_dm: /miniscratch/subakany/LibriMixData_new/LibriSpeech/train-clean-360_processed/ |
|
rir_path: /miniscratch/subakany/whamr_rirs_wav |
|
|
|
experiment_name: snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators |
|
output_folder: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234 |
|
train_log: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/train_log.txt |
|
save_folder: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save |
|
train_data: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save/libri2mix_train-360.csv |
|
valid_data: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save/libri2mix_dev.csv |
|
test_data: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save/libri2mix_test.csv |
|
|
|
wsj_data_folder: /network/tmp1/subakany/wham_original |
|
train_wsj_data: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save/wham_tr.csv |
|
test_wsj_data: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save/wham_tt.csv |
|
base_folder_dm_whamr: /network/tmp1/subakany/wsj0-processed/si_tr_s |
|
use_whamr_train: true |
|
whamr_proportion: 0.6 |
|
|
|
test_onwsj: false |
|
|
|
skip_prep: false |
|
|
|
ckpt_interval_minutes: 60 |
|
|
|
|
|
auto_mix_prec: false |
|
test_only: false |
|
num_spks: 2 |
|
progressbar: true |
|
save_audio: false |
|
sample_rate: 8000 |
|
|
|
|
|
N_epochs: 200 |
|
batch_size: 1 |
|
lr: 0.0001 |
|
clip_grad_norm: 5 |
|
loss_upper_lim: 999999 |
|
|
|
limit_training_signal_len: false |
|
|
|
|
|
training_signal_len: 32000000 |
|
|
|
|
|
dynamic_mixing: true |
|
use_wham_noise: true |
|
use_reverb_augment: true |
|
|
|
|
|
use_wavedrop: false |
|
use_speedperturb: true |
|
use_speedperturb_sameforeachsource: false |
|
use_rand_shift: false |
|
min_shift: -8000 |
|
max_shift: 8000 |
|
|
|
speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment |
|
perturb_prob: 1.0 |
|
drop_freq_prob: 0.0 |
|
drop_chunk_prob: 0.0 |
|
sample_rate: 8000 |
|
speeds: [95, 100, 105] |
|
|
|
wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment |
|
perturb_prob: 0.0 |
|
drop_freq_prob: 1.0 |
|
drop_chunk_prob: 1.0 |
|
sample_rate: 8000 |
|
|
|
|
|
threshold_byloss: true |
|
threshold: -30 |
|
|
|
|
|
N_encoder_out: 256 |
|
out_channels: 256 |
|
kernel_size: 16 |
|
kernel_stride: 8 |
|
|
|
|
|
dataloader_opts: |
|
batch_size: 1 |
|
num_workers: 0 |
|
|
|
|
|
|
|
Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder |
|
kernel_size: 16 |
|
out_channels: 256 |
|
|
|
|
|
SBtfintra: &id001 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock |
|
num_layers: 8 |
|
d_model: 256 |
|
nhead: 8 |
|
d_ffn: 1024 |
|
dropout: 0 |
|
use_positional_encoding: true |
|
norm_before: true |
|
|
|
SBtfinter: &id002 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock |
|
num_layers: 8 |
|
d_model: 256 |
|
nhead: 8 |
|
d_ffn: 1024 |
|
dropout: 0 |
|
use_positional_encoding: true |
|
norm_before: true |
|
|
|
MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model |
|
num_spks: 2 |
|
in_channels: 256 |
|
out_channels: 256 |
|
num_layers: 2 |
|
K: 250 |
|
intra_model: *id001 |
|
inter_model: *id002 |
|
norm: ln |
|
linear_layer_after_inter_intra: false |
|
skip_around_intra: true |
|
|
|
Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder |
|
in_channels: 256 |
|
out_channels: 1 |
|
kernel_size: 16 |
|
stride: 8 |
|
bias: false |
|
|
|
snrmin: 0 |
|
snrmax: 10 |
|
out_n_neurons: 16 |
|
use_snr_compression: true |
|
separation_norm_type: stnorm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
latent_dim: 128 |
|
n_inp: 256 |
|
encoder: &id006 !new:speechbrain.nnet.containers.Sequential |
|
input_shape: [!!null '', 2, !!null ''] |
|
cnn1: !new:speechbrain.nnet.CNN.Conv1d |
|
in_channels: 2 |
|
kernel_size: 4 |
|
out_channels: 128 |
|
stride: 1 |
|
skip_transpose: true |
|
padding: valid |
|
relu1: !new:torch.nn.ReLU |
|
cnn2: !new:speechbrain.nnet.CNN.Conv1d |
|
in_channels: 128 |
|
kernel_size: 4 |
|
out_channels: 128 |
|
stride: 2 |
|
skip_transpose: true |
|
padding: valid |
|
relu2: !new:torch.nn.ReLU |
|
cnn3: !new:speechbrain.nnet.CNN.Conv1d |
|
in_channels: 128 |
|
kernel_size: 4 |
|
out_channels: 128 |
|
stride: 2 |
|
skip_transpose: true |
|
padding: valid |
|
relu3: !new:torch.nn.ReLU |
|
cnn4: !new:speechbrain.nnet.CNN.Conv1d |
|
in_channels: 128 |
|
kernel_size: 4 |
|
out_channels: 128 |
|
stride: 2 |
|
skip_transpose: true |
|
padding: valid |
|
relu4: !new:torch.nn.ReLU |
|
cnn5: !new:speechbrain.nnet.CNN.Conv1d |
|
in_channels: 128 |
|
kernel_size: 4 |
|
out_channels: 128 |
|
stride: 2 |
|
skip_transpose: true |
|
padding: valid |
|
|
|
stat_pooling: !new:speechbrain.nnet.pooling.StatisticsPooling |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
encoder_out: &id007 !new:speechbrain.nnet.containers.Sequential |
|
|
|
|
|
input_shape: [!!null '', 256] |
|
layer1: !new:speechbrain.nnet.linear.Linear |
|
input_size: 256 |
|
n_neurons: 256 |
|
relu: !new:torch.nn.ReLU |
|
layer2: !new:speechbrain.nnet.linear.Linear |
|
input_size: 256 |
|
n_neurons: 1 |
|
sigm: !new:torch.nn.Sigmoid |
|
|
|
|
|
|
|
classifier_loss: !new:torch.nn.CrossEntropyLoss |
|
|
|
optimizer: !name:torch.optim.Adam |
|
lr: 0.0001 |
|
weight_decay: 0 |
|
|
|
loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper |
|
|
|
lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau |
|
factor: 0.5 |
|
patience: 2 |
|
dont_halve_until_epoch: 95 |
|
|
|
epoch_counter: &id008 !new:speechbrain.utils.epoch_loop.EpochCounter |
|
limit: 200 |
|
|
|
modules: |
|
encoder: *id003 |
|
decoder: *id004 |
|
masknet: *id005 |
|
encoder: *id006 |
|
encoder_out: *id007 |
|
checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer |
|
checkpoints_dir: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/save |
|
recoverables: |
|
counter: *id008 |
|
encoder: *id006 |
|
encoder_out: *id007 |
|
train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger |
|
save_file: results/snrtrain-timedomain-sbpooling-wwhamr-lessstride-stnorm-manyseparators/1234/train_log.txt |
|
|
|
num_separators_per_model: 3 |
|
separator_base_folder: /home/mila/s/subakany/speechbrain_new/recipes/WHAMandWHAMR/separation/results/ |
|
|
|
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer |
|
loadables: |
|
encoder: !ref <encoder> |
|
encoder_out: !ref <encoder_out> |
|
|