Music-Source-Separation-Training / config_vocals_segm_models.yaml
Eddycrack864's picture
Upload 14 files
4a3768d verified
audio:
chunk_size: 261632
dim_f: 4096
dim_t: 512
hop_length: 512
n_fft: 8192
num_channels: 2
sample_rate: 44100
min_mean_abs: 0.001
model:
encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders-
decoder_type: unet # unet, fpn
act: gelu
num_channels: 128
num_subbands: 8
training:
batch_size: 8
gradient_accumulation_steps: 1
grad_clip: 0
instruments:
- vocals
- other
lr: 5.0e-05
patience: 2
reduce_factor: 0.95
target_instrument: null
num_epochs: 1000
num_steps: 2000
augmentation: false # enable augmentations by audiomentations and pedalboard
augmentation_type: simple1
use_mp3_compress: false # Deprecated
augmentation_mix: true # Mix several stems of the same type with some probability
augmentation_loudness: true # randomly change loudness of each stem
augmentation_loudness_type: 1 # Type 1 or 2
augmentation_loudness_min: 0.5
augmentation_loudness_max: 1.5
q: 0.95
coarse_loss_clip: true
ema_momentum: 0.999
optimizer: adamw
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
inference:
batch_size: 1
dim_t: 512
num_overlap: 4