|
{ |
|
"best_metric": 80.76073976371114, |
|
"best_model_checkpoint": "/root/turkic_qa/ru_kaz_models/ru_kaz_xlm_roberta_large_model/checkpoint-2208", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 2760, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"step": 552, |
|
"train_exact_match": 58.94105894105894, |
|
"train_f1": 78.59308014289637, |
|
"train_runtime": 23.2375, |
|
"train_samples_per_second": 43.722, |
|
"train_steps_per_second": 1.592 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 83.0545425415039, |
|
"learning_rate": 1e-05, |
|
"loss": 2.9982, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_exact_match": 57.84375, |
|
"eval_f1": 76.41032874262423, |
|
"eval_runtime": 74.7526, |
|
"eval_samples_per_second": 43.891, |
|
"eval_steps_per_second": 1.579, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 1104, |
|
"train_exact_match": 73.52647352647352, |
|
"train_f1": 87.35432090211079, |
|
"train_runtime": 23.6437, |
|
"train_samples_per_second": 43.479, |
|
"train_steps_per_second": 1.565 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 65.99756622314453, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 1.0802, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_exact_match": 62.90625, |
|
"eval_f1": 79.63501935340823, |
|
"eval_runtime": 75.6962, |
|
"eval_samples_per_second": 43.344, |
|
"eval_steps_per_second": 1.559, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1656, |
|
"train_exact_match": 77.22277722277722, |
|
"train_f1": 90.8541682004209, |
|
"train_runtime": 23.3536, |
|
"train_samples_per_second": 43.548, |
|
"train_steps_per_second": 1.584 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 94.93318939208984, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7755, |
|
"step": 1656 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_exact_match": 63.40625, |
|
"eval_f1": 79.91103921274045, |
|
"eval_runtime": 75.0081, |
|
"eval_samples_per_second": 43.742, |
|
"eval_steps_per_second": 1.573, |
|
"step": 1656 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2208, |
|
"train_exact_match": 82.81718281718281, |
|
"train_f1": 93.85897538338976, |
|
"train_runtime": 23.3033, |
|
"train_samples_per_second": 43.513, |
|
"train_steps_per_second": 1.588 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 181.9861297607422, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.584, |
|
"step": 2208 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_exact_match": 63.96875, |
|
"eval_f1": 80.76073976371114, |
|
"eval_runtime": 74.9415, |
|
"eval_samples_per_second": 43.781, |
|
"eval_steps_per_second": 1.575, |
|
"step": 2208 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 2760, |
|
"train_exact_match": 85.81418581418582, |
|
"train_f1": 95.18484768485531, |
|
"train_runtime": 23.5964, |
|
"train_samples_per_second": 43.312, |
|
"train_steps_per_second": 1.568 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 45.12015914916992, |
|
"learning_rate": 0.0, |
|
"loss": 0.4601, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_exact_match": 64.4375, |
|
"eval_f1": 80.75596391907597, |
|
"eval_runtime": 75.7078, |
|
"eval_samples_per_second": 43.338, |
|
"eval_steps_per_second": 1.559, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 2760, |
|
"total_flos": 5.376168689054976e+16, |
|
"train_loss": 1.1795940786168195, |
|
"train_runtime": 4973.0116, |
|
"train_samples_per_second": 15.521, |
|
"train_steps_per_second": 0.555 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2760, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 5.376168689054976e+16, |
|
"train_batch_size": 28, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|