pretrained_path: sinarashidi/10epoch sample_rate: 16000 # URL for the HuggingFace model we want to load as encoder wav2vec2_hub: m3hrdadfi/wav2vec2-large-xlsr-persian-v3 # Outputs vocab_size: 100 blank_index: 99 bos_index: 97 eos_index: 98 pad_index: 99 label_smoothing: 0.0 # Encoder features_dim: 1024 # Length Regulator enc_kernel_size: 3 enc_stride: 2 # Transformer decoder embedding_size: 512 d_model: 512 nhead: 8 num_encoder_layers: 0 num_decoder_layers: 6 d_ffn: 2048 transformer_dropout: 0.1 activation: !name:torch.nn.GELU output_neurons: !ref attention_type: "RelPosMHAXL" # Decoding parameters min_decode_ratio: 0.0 max_decode_ratio: 1.0 wav2vec2: !new:speechbrain.lobes.models.huggingface_transformers.wav2vec2.Wav2Vec2 source: !ref output_norm: True freeze: True freeze_feature_extractor: True apply_spec_augment : True save_path: wav2vec2_checkpoints length_regulator: !new:speechbrain.nnet.CNN.Conv1d input_shape: [null, null, !ref ] out_channels: !ref kernel_size: !ref stride: !ref transformer_decoder: !new:speechbrain.lobes.models.transformer.TransformerST.TransformerST # yamllint disable-line rule:line-length input_size: !ref tgt_vocab: !ref d_model: !ref nhead: !ref num_encoder_layers: !ref num_decoder_layers: !ref d_ffn: !ref dropout: !ref activation: !ref attention_type: !ref normalize_before: True causal: False log_softmax: !new:speechbrain.nnet.activations.Softmax apply_log: True seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref n_neurons: !ref model: !new:torch.nn.ModuleList - [!ref , !ref , !ref ] encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential wav2vec2: !ref length_regulator: !ref decoder_beamsearch: !new:speechbrain.decoders.seq2seq.S2STransformerBeamSearcher modules: [!ref , !ref ] bos_index: !ref eos_index: !ref min_decode_ratio: !ref max_decode_ratio: !ref beam_size: 10 temperature: 1.0 modules: encoder: !ref decoder: !ref pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: model: !ref wav2vec2: !ref paths: wav2vec2: !ref /wav2vec2.ckpt model: !ref /model.ckpt