pretrained_model=microsoft/codebert-base output_dir=../model data_size=small CUDA_VISIBLE_DEVICES=1 python run.py \ --do_train \ --do_eval \ --model_type roberta \ --model_name_or_path $pretrained_model \ --config_name roberta-base \ --tokenizer_name roberta-base \ --train_filename ../data/$data_size/train.buggy-fixed.buggy,../data/$data_size/train.buggy-fixed.fixed \ --dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \ --output_dir $output_dir \ --max_source_length 256 \ --max_target_length 256 \ --beam_size 5 \ --train_batch_size 16 \ --eval_batch_size 16 \ --learning_rate 5e-5 \ --train_steps 100000 \ --eval_steps 5000