python run_flax_speech_recognition_ctc.py \ | |
--model_name_or_path="esc-benchmark/wav2vec2-ctc-pretrained" \ | |
--tokenizer_name="wav2vec2-ctc-gigaspeech-tokenizer" \ | |
--dataset_name="esc-benchmark/esc-datasets" \ | |
--dataset_config_name="gigaspeech" \ | |
--output_dir="./" \ | |
--wandb_project="wav2vec2-ctc" \ | |
--wandb_name="wav2vec2-ctc-gigaspeech" \ | |
--max_steps="50000" \ | |
--save_steps="10000" \ | |
--eval_steps="10000" \ | |
--learning_rate="3e-4" \ | |
--logging_steps="25" \ | |
--warmup_steps="5000" \ | |
--preprocessing_num_workers="1" \ | |
--do_train \ | |
--do_eval \ | |
--do_predict \ | |
--overwrite_output_dir \ | |
--gradient_checkpointing \ | |
--freeze_feature_encoder \ | |
--push_to_hub \ | |
--use_auth_token |