|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.5207485760781123, |
|
"global_step": 320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.8762, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.7783, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.7497, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.9999790913463623e-05, |
|
"loss": 0.6797, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.6140000224113464, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6632643938064575, |
|
"eval_runtime": 64.3151, |
|
"eval_samples_per_second": 7.774, |
|
"eval_steps_per_second": 0.249, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.9992473496712718e-05, |
|
"loss": 0.676, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.9974707581369814e-05, |
|
"loss": 0.6647, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.994650554879023e-05, |
|
"loss": 0.6657, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.9907887053429107e-05, |
|
"loss": 0.6742, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.6060000061988831, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6763938069343567, |
|
"eval_runtime": 64.3111, |
|
"eval_samples_per_second": 7.775, |
|
"eval_steps_per_second": 0.249, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.985887900914388e-05, |
|
"loss": 0.6677, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.9799515570437597e-05, |
|
"loss": 0.6838, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.972983810865608e-05, |
|
"loss": 0.6872, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 2.9649895183155556e-05, |
|
"loss": 0.6794, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.5860000252723694, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6799845099449158, |
|
"eval_runtime": 64.3206, |
|
"eval_samples_per_second": 7.774, |
|
"eval_steps_per_second": 0.249, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.9559742507460873e-05, |
|
"loss": 0.6693, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.9459442910437798e-05, |
|
"loss": 0.6751, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.9349066292506613e-05, |
|
"loss": 0.6702, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.9228689576927327e-05, |
|
"loss": 0.6596, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_accuracy": 0.5879999995231628, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6791808009147644, |
|
"eval_runtime": 64.2883, |
|
"eval_samples_per_second": 7.777, |
|
"eval_steps_per_second": 0.249, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 2.909839665619062e-05, |
|
"loss": 0.6523, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.8958278333551827e-05, |
|
"loss": 0.668, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.8808432259748648e-05, |
|
"loss": 0.6351, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.864896286494674e-05, |
|
"loss": 0.6585, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.5820000171661377, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6792215704917908, |
|
"eval_runtime": 64.2728, |
|
"eval_samples_per_second": 7.779, |
|
"eval_steps_per_second": 0.249, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.8479981285960694e-05, |
|
"loss": 0.697, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.830160528880093e-05, |
|
"loss": 0.6539, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.8113959186600674e-05, |
|
"loss": 0.6727, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.7917173752980103e-05, |
|
"loss": 0.6458, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.5699999928474426, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6816621422767639, |
|
"eval_runtime": 64.3027, |
|
"eval_samples_per_second": 7.776, |
|
"eval_steps_per_second": 0.249, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.77113861309081e-05, |
|
"loss": 0.682, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 2.7496739737125063e-05, |
|
"loss": 0.6664, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.7273384162193462e-05, |
|
"loss": 0.6493, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 2.7041475066245742e-05, |
|
"loss": 0.6585, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_accuracy": 0.5659999847412109, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.679412841796875, |
|
"eval_runtime": 64.3077, |
|
"eval_samples_per_second": 7.775, |
|
"eval_steps_per_second": 0.249, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.6801174070502248e-05, |
|
"loss": 0.6326, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.6552648644634765e-05, |
|
"loss": 0.6597, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.6296071990054167e-05, |
|
"loss": 0.6495, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 2.603162291920356e-05, |
|
"loss": 0.6953, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.5619999766349792, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6798678636550903, |
|
"eval_runtime": 64.3074, |
|
"eval_samples_per_second": 7.775, |
|
"eval_steps_per_second": 0.249, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.575948573094098e-05, |
|
"loss": 0.6436, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.5479850082098485e-05, |
|
"loss": 0.6527, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.5192910855307295e-05, |
|
"loss": 0.6434, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.4898868023180844e-05, |
|
"loss": 0.6586, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.5879999995231628, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6790176033973694, |
|
"eval_runtime": 64.3234, |
|
"eval_samples_per_second": 7.773, |
|
"eval_steps_per_second": 0.249, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 2.4597926508950648e-05, |
|
"loss": 0.6444, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 2.429029604365198e-05, |
|
"loss": 0.657, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 2.3976191019958896e-05, |
|
"loss": 0.6346, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.3655830342770463e-05, |
|
"loss": 0.669, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.6119999885559082, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6743248105049133, |
|
"eval_runtime": 64.2981, |
|
"eval_samples_per_second": 7.776, |
|
"eval_steps_per_second": 0.249, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.3329437276652424e-05, |
|
"loss": 0.6295, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 2.299723929024046e-05, |
|
"loss": 0.6244, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 2.2659467897713604e-05, |
|
"loss": 0.6341, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.231635849744825e-05, |
|
"loss": 0.6376, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.5839999914169312, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6735034584999084, |
|
"eval_runtime": 64.3161, |
|
"eval_samples_per_second": 7.774, |
|
"eval_steps_per_second": 0.249, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.196815020796519e-05, |
|
"loss": 0.6264, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.161508570128403e-05, |
|
"loss": 0.6178, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 2.1257411033801125e-05, |
|
"loss": 0.6578, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 2.0895375474808857e-05, |
|
"loss": 0.6173, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.5740000009536743, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6771048903465271, |
|
"eval_runtime": 64.3005, |
|
"eval_samples_per_second": 7.776, |
|
"eval_steps_per_second": 0.249, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.052923133277581e-05, |
|
"loss": 0.64, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.0159233779508923e-05, |
|
"loss": 0.6173, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.9785640672320074e-05, |
|
"loss": 0.6104, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9408712374321155e-05, |
|
"loss": 0.6295, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.5960000157356262, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6679331064224243, |
|
"eval_runtime": 64.1981, |
|
"eval_samples_per_second": 7.788, |
|
"eval_steps_per_second": 0.249, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9028711572972753e-05, |
|
"loss": 0.6366, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.864590309701302e-05, |
|
"loss": 0.6567, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.82605537318942e-05, |
|
"loss": 0.6397, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7872932033855518e-05, |
|
"loss": 0.6311, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_accuracy": 0.6000000238418579, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6692066788673401, |
|
"eval_runtime": 64.3111, |
|
"eval_samples_per_second": 7.775, |
|
"eval_steps_per_second": 0.249, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.748330814276195e-05, |
|
"loss": 0.6361, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7091953593839383e-05, |
|
"loss": 0.6583, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.6699141128437286e-05, |
|
"loss": 0.6321, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.630514450395084e-05, |
|
"loss": 0.6294, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.5899999737739563, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6700637340545654, |
|
"eval_runtime": 64.3279, |
|
"eval_samples_per_second": 7.773, |
|
"eval_steps_per_second": 0.249, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.591023830303493e-05, |
|
"loss": 0.6479, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.5514697742243067e-05, |
|
"loss": 0.6332, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.511879848022446e-05, |
|
"loss": 0.6076, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.4722816425613054e-05, |
|
"loss": 0.6526, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.6039999723434448, |
|
"eval_label_positive_rate": 0.5199999809265137, |
|
"eval_loss": 0.6673489212989807, |
|
"eval_runtime": 64.2956, |
|
"eval_samples_per_second": 7.777, |
|
"eval_steps_per_second": 0.249, |
|
"step": 320 |
|
} |
|
], |
|
"max_steps": 614, |
|
"num_train_epochs": 1, |
|
"total_flos": 1.1109883301467259e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|