language: | |
- en | |
tags: | |
- esb | |
datasets: | |
- esb/datasets | |
- speechcolab/gigaspeech | |
To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: | |
```python | |
#!/usr/bin/env bash | |
CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ | |
--config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ | |
--model_name_or_path="stt_en_conformer_transducer_xlarge" \ | |
--dataset_name="esb/datasets" \ | |
--tokenizer_path="tokenizer" \ | |
--vocab_size="1024" \ | |
--num_train_epochs="0.88" \ | |
--dataset_config_name="gigaspeech" \ | |
--output_dir="./" \ | |
--run_name="conformer-rnnt-gigaspeech" \ | |
--wandb_project="rnnt" \ | |
--per_device_train_batch_size="8" \ | |
--per_device_eval_batch_size="4" \ | |
--logging_steps="50" \ | |
--learning_rate="1e-4" \ | |
--warmup_steps="500" \ | |
--save_strategy="steps" \ | |
--save_steps="20000" \ | |
--evaluation_strategy="steps" \ | |
--eval_steps="20000" \ | |
--report_to="wandb" \ | |
--preprocessing_num_workers="4" \ | |
--fused_batch_size="4" \ | |
--length_column_name="input_lengths" \ | |
--fuse_loss_wer \ | |
--group_by_length \ | |
--overwrite_output_dir \ | |
--do_train \ | |
--do_eval \ | |
--do_predict \ | |
--use_auth_token | |
``` | |