|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.0, |
|
"global_step": 19800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.8737373737373736e-05, |
|
"loss": 3.8927, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.7474747474747476e-05, |
|
"loss": 2.3719, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7787933945655823, |
|
"eval_loss": 1.1852809190750122, |
|
"eval_runtime": 2.0844, |
|
"eval_samples_per_second": 524.845, |
|
"eval_steps_per_second": 65.726, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.621212121212121e-05, |
|
"loss": 1.4046, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.494949494949495e-05, |
|
"loss": 0.7837, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.368686868686869e-05, |
|
"loss": 0.5941, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.880255937576294, |
|
"eval_loss": 0.5889360308647156, |
|
"eval_runtime": 2.0716, |
|
"eval_samples_per_second": 528.095, |
|
"eval_steps_per_second": 66.133, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.242424242424243e-05, |
|
"loss": 0.3401, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.116161616161616e-05, |
|
"loss": 0.2557, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8866544961929321, |
|
"eval_loss": 0.5768323540687561, |
|
"eval_runtime": 2.1209, |
|
"eval_samples_per_second": 515.813, |
|
"eval_steps_per_second": 64.595, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.98989898989899e-05, |
|
"loss": 0.2031, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.8636363636363636e-05, |
|
"loss": 0.1193, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 3.7373737373737376e-05, |
|
"loss": 0.1178, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8811700344085693, |
|
"eval_loss": 0.6243242621421814, |
|
"eval_runtime": 2.1461, |
|
"eval_samples_per_second": 509.767, |
|
"eval_steps_per_second": 63.837, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.1101, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 3.484848484848485e-05, |
|
"loss": 0.078, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 3.358585858585859e-05, |
|
"loss": 0.0948, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8912248611450195, |
|
"eval_loss": 0.650793194770813, |
|
"eval_runtime": 2.0892, |
|
"eval_samples_per_second": 523.641, |
|
"eval_steps_per_second": 65.575, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 3.232323232323233e-05, |
|
"loss": 0.0352, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 3.106060606060606e-05, |
|
"loss": 0.079, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8930529952049255, |
|
"eval_loss": 0.6763377785682678, |
|
"eval_runtime": 2.0996, |
|
"eval_samples_per_second": 521.052, |
|
"eval_steps_per_second": 65.251, |
|
"step": 7920 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 2.9797979797979796e-05, |
|
"loss": 0.054, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 2.8535353535353536e-05, |
|
"loss": 0.0388, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.0413, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9021937847137451, |
|
"eval_loss": 0.6991991400718689, |
|
"eval_runtime": 2.1868, |
|
"eval_samples_per_second": 500.277, |
|
"eval_steps_per_second": 62.649, |
|
"step": 9240 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 2.6010101010101013e-05, |
|
"loss": 0.046, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 2.474747474747475e-05, |
|
"loss": 0.0295, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 2.3484848484848487e-05, |
|
"loss": 0.0291, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8921389579772949, |
|
"eval_loss": 0.7643230557441711, |
|
"eval_runtime": 2.2005, |
|
"eval_samples_per_second": 497.161, |
|
"eval_steps_per_second": 62.259, |
|
"step": 10560 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.0236, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 2.095959595959596e-05, |
|
"loss": 0.032, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9113345742225647, |
|
"eval_loss": 0.6661025285720825, |
|
"eval_runtime": 2.0869, |
|
"eval_samples_per_second": 524.224, |
|
"eval_steps_per_second": 65.648, |
|
"step": 11880 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 1.9696969696969697e-05, |
|
"loss": 0.0323, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 1.8434343434343433e-05, |
|
"loss": 0.0158, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 1.7171717171717173e-05, |
|
"loss": 0.027, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9085923433303833, |
|
"eval_loss": 0.6882250308990479, |
|
"eval_runtime": 2.2029, |
|
"eval_samples_per_second": 496.616, |
|
"eval_steps_per_second": 62.19, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 1.590909090909091e-05, |
|
"loss": 0.0147, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"learning_rate": 1.4646464646464647e-05, |
|
"loss": 0.0125, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"learning_rate": 1.3383838383838385e-05, |
|
"loss": 0.0241, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.7250856757164001, |
|
"eval_runtime": 2.0725, |
|
"eval_samples_per_second": 527.862, |
|
"eval_steps_per_second": 66.103, |
|
"step": 14520 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 0.0108, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 11.74, |
|
"learning_rate": 1.085858585858586e-05, |
|
"loss": 0.0109, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8976234197616577, |
|
"eval_loss": 0.7604121565818787, |
|
"eval_runtime": 2.2757, |
|
"eval_samples_per_second": 480.738, |
|
"eval_steps_per_second": 60.202, |
|
"step": 15840 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 9.595959595959595e-06, |
|
"loss": 0.0122, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0058, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 7.0707070707070704e-06, |
|
"loss": 0.0056, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.7356353998184204, |
|
"eval_runtime": 2.1586, |
|
"eval_samples_per_second": 506.799, |
|
"eval_steps_per_second": 63.466, |
|
"step": 17160 |
|
}, |
|
{ |
|
"epoch": 13.26, |
|
"learning_rate": 5.808080808080808e-06, |
|
"loss": 0.0055, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.0018, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9076782464981079, |
|
"eval_loss": 0.7189434170722961, |
|
"eval_runtime": 2.0825, |
|
"eval_samples_per_second": 525.332, |
|
"eval_steps_per_second": 65.787, |
|
"step": 18480 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 3.2828282828282835e-06, |
|
"loss": 0.0067, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 2.0202020202020206e-06, |
|
"loss": 0.0023, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 7.575757575757576e-07, |
|
"loss": 0.0017, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9067641496658325, |
|
"eval_loss": 0.7214756608009338, |
|
"eval_runtime": 2.1084, |
|
"eval_samples_per_second": 518.881, |
|
"eval_steps_per_second": 64.979, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"step": 19800, |
|
"total_flos": 5215995096399360.0, |
|
"train_loss": 0.27693930435361286, |
|
"train_runtime": 2167.3602, |
|
"train_samples_per_second": 73.057, |
|
"train_steps_per_second": 9.136 |
|
} |
|
], |
|
"max_steps": 19800, |
|
"num_train_epochs": 15, |
|
"total_flos": 5215995096399360.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|