#export TOKENIZERS_PARALLELISM=0 | |
python ./run_mlm_flax.py \ | |
--push_to_hub \ | |
--output_dir="./" \ | |
--model_type="big_bird" \ | |
--config_name="./" \ | |
--tokenizer_name="./" \ | |
--max_seq_length="4096" \ | |
--weight_decay="0.0095" \ | |
--warmup_steps="5000" \ | |
--overwrite_output_dir \ | |
--adam_beta1="0.9" \ | |
--adam_beta2="0.98" \ | |
--logging_steps="250" \ | |
--eval_steps="500" \ | |
--num_train_epochs="5" \ | |
--preprocessing_num_workers="96" \ | |
--save_steps="500" \ | |
--learning_rate="5e-5" \ | |
--per_device_train_batch_size="2" \ | |
--per_device_eval_batch_size="2" \ | |
--save_total_limit="5"\ | |
--max_eval_samples="500"\ | |
--overwrite_cache False \ | |
--gradient_accumulation_steps="4" \ | |
#--resume_from_checkpoint="./"\ | |
#--adafactor \ | |
#--dtype="bfloat16" \ | |