|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.0, |
|
"global_step": 19800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.9494949494949496e-05, |
|
"loss": 4.4759, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.8989898989898993e-05, |
|
"loss": 3.4666, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5767824649810791, |
|
"eval_loss": 2.3355202674865723, |
|
"eval_runtime": 2.6806, |
|
"eval_samples_per_second": 408.112, |
|
"eval_steps_per_second": 51.107, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.8484848484848487e-05, |
|
"loss": 2.6641, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.797979797979798e-05, |
|
"loss": 1.9312, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.7474747474747475e-05, |
|
"loss": 1.5293, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8144423961639404, |
|
"eval_loss": 1.1118409633636475, |
|
"eval_runtime": 2.7222, |
|
"eval_samples_per_second": 401.874, |
|
"eval_steps_per_second": 50.326, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.6969696969696972e-05, |
|
"loss": 1.0511, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 1.6464646464646466e-05, |
|
"loss": 0.8031, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.880255937576294, |
|
"eval_loss": 0.6362389922142029, |
|
"eval_runtime": 2.657, |
|
"eval_samples_per_second": 411.738, |
|
"eval_steps_per_second": 51.561, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.595959595959596e-05, |
|
"loss": 0.5864, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.5454545454545454e-05, |
|
"loss": 0.3632, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.4949494949494952e-05, |
|
"loss": 0.2985, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8957952260971069, |
|
"eval_loss": 0.5118863582611084, |
|
"eval_runtime": 2.6864, |
|
"eval_samples_per_second": 407.231, |
|
"eval_steps_per_second": 50.997, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.4444444444444446e-05, |
|
"loss": 0.2188, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.3939393939393942e-05, |
|
"loss": 0.1478, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.3434343434343436e-05, |
|
"loss": 0.1284, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8930529952049255, |
|
"eval_loss": 0.5023291707038879, |
|
"eval_runtime": 2.7918, |
|
"eval_samples_per_second": 391.858, |
|
"eval_steps_per_second": 49.072, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 1.2929292929292931e-05, |
|
"loss": 0.0608, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 1.2424242424242425e-05, |
|
"loss": 0.0842, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9021937847137451, |
|
"eval_loss": 0.5246109962463379, |
|
"eval_runtime": 2.6464, |
|
"eval_samples_per_second": 413.391, |
|
"eval_steps_per_second": 51.768, |
|
"step": 7920 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.191919191919192e-05, |
|
"loss": 0.064, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 1.1414141414141415e-05, |
|
"loss": 0.0461, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 0.0414, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9012796878814697, |
|
"eval_loss": 0.5580916404724121, |
|
"eval_runtime": 2.6062, |
|
"eval_samples_per_second": 419.767, |
|
"eval_steps_per_second": 52.567, |
|
"step": 9240 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 1.0404040404040405e-05, |
|
"loss": 0.0476, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 9.8989898989899e-06, |
|
"loss": 0.0333, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 9.393939393939396e-06, |
|
"loss": 0.0372, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9003656506538391, |
|
"eval_loss": 0.5721055269241333, |
|
"eval_runtime": 3.0748, |
|
"eval_samples_per_second": 355.794, |
|
"eval_steps_per_second": 44.556, |
|
"step": 10560 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.0276, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 8.383838383838384e-06, |
|
"loss": 0.0292, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9140768051147461, |
|
"eval_loss": 0.5468941926956177, |
|
"eval_runtime": 3.0391, |
|
"eval_samples_per_second": 359.977, |
|
"eval_steps_per_second": 45.079, |
|
"step": 11880 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 7.87878787878788e-06, |
|
"loss": 0.0298, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 7.373737373737374e-06, |
|
"loss": 0.0209, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 6.868686868686869e-06, |
|
"loss": 0.0257, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9058501124382019, |
|
"eval_loss": 0.5871404409408569, |
|
"eval_runtime": 2.8242, |
|
"eval_samples_per_second": 387.373, |
|
"eval_steps_per_second": 48.51, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 6.363636363636364e-06, |
|
"loss": 0.0205, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"learning_rate": 5.858585858585859e-06, |
|
"loss": 0.0145, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"learning_rate": 5.353535353535354e-06, |
|
"loss": 0.0189, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.6180957555770874, |
|
"eval_runtime": 2.9518, |
|
"eval_samples_per_second": 370.627, |
|
"eval_steps_per_second": 46.413, |
|
"step": 14520 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 4.848484848484849e-06, |
|
"loss": 0.0117, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 11.74, |
|
"learning_rate": 4.343434343434344e-06, |
|
"loss": 0.0104, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9067641496658325, |
|
"eval_loss": 0.618419349193573, |
|
"eval_runtime": 2.8761, |
|
"eval_samples_per_second": 380.381, |
|
"eval_steps_per_second": 47.635, |
|
"step": 15840 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 3.8383838383838385e-06, |
|
"loss": 0.0155, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0087, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 2.8282828282828286e-06, |
|
"loss": 0.009, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.6013244986534119, |
|
"eval_runtime": 2.7727, |
|
"eval_samples_per_second": 394.554, |
|
"eval_steps_per_second": 49.409, |
|
"step": 17160 |
|
}, |
|
{ |
|
"epoch": 13.26, |
|
"learning_rate": 2.3232323232323234e-06, |
|
"loss": 0.0078, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 1.8181818181818183e-06, |
|
"loss": 0.0051, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9058501124382019, |
|
"eval_loss": 0.620483934879303, |
|
"eval_runtime": 3.0663, |
|
"eval_samples_per_second": 356.786, |
|
"eval_steps_per_second": 44.68, |
|
"step": 18480 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 1.3131313131313134e-06, |
|
"loss": 0.0091, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 8.080808080808082e-07, |
|
"loss": 0.0046, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 3.0303030303030305e-07, |
|
"loss": 0.0035, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9067641496658325, |
|
"eval_loss": 0.6223405599594116, |
|
"eval_runtime": 3.0619, |
|
"eval_samples_per_second": 357.298, |
|
"eval_steps_per_second": 44.744, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"step": 19800, |
|
"total_flos": 5215995096399360.0, |
|
"train_loss": 0.4635289347472817, |
|
"train_runtime": 2578.0344, |
|
"train_samples_per_second": 61.419, |
|
"train_steps_per_second": 7.68 |
|
} |
|
], |
|
"max_steps": 19800, |
|
"num_train_epochs": 15, |
|
"total_flos": 5215995096399360.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|