|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 30.0, |
|
"global_step": 9900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.22486288845539093, |
|
"eval_loss": 3.9691851139068604, |
|
"eval_runtime": 3.5766, |
|
"eval_samples_per_second": 305.88, |
|
"eval_steps_per_second": 38.305, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.494949494949497e-06, |
|
"loss": 4.3672, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.4031078517436981, |
|
"eval_loss": 3.1312456130981445, |
|
"eval_runtime": 3.5565, |
|
"eval_samples_per_second": 307.607, |
|
"eval_steps_per_second": 38.521, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5658135414123535, |
|
"eval_loss": 2.5068106651306152, |
|
"eval_runtime": 3.6412, |
|
"eval_samples_per_second": 300.452, |
|
"eval_steps_per_second": 37.625, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 8.98989898989899e-06, |
|
"loss": 3.1495, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6599634289741516, |
|
"eval_loss": 2.029958724975586, |
|
"eval_runtime": 3.6051, |
|
"eval_samples_per_second": 303.46, |
|
"eval_steps_per_second": 38.002, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 8.484848484848486e-06, |
|
"loss": 2.2491, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7449725866317749, |
|
"eval_loss": 1.651670217514038, |
|
"eval_runtime": 3.5564, |
|
"eval_samples_per_second": 307.618, |
|
"eval_steps_per_second": 38.523, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.79433274269104, |
|
"eval_loss": 1.3604321479797363, |
|
"eval_runtime": 3.5912, |
|
"eval_samples_per_second": 304.631, |
|
"eval_steps_per_second": 38.148, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 7.97979797979798e-06, |
|
"loss": 1.622, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8327239751815796, |
|
"eval_loss": 1.132811427116394, |
|
"eval_runtime": 3.5858, |
|
"eval_samples_per_second": 305.092, |
|
"eval_steps_per_second": 38.206, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 7.474747474747476e-06, |
|
"loss": 1.1252, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8610603213310242, |
|
"eval_loss": 0.9484481811523438, |
|
"eval_runtime": 3.5674, |
|
"eval_samples_per_second": 306.664, |
|
"eval_steps_per_second": 38.403, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8756855726242065, |
|
"eval_loss": 0.8212197422981262, |
|
"eval_runtime": 3.5581, |
|
"eval_samples_per_second": 307.464, |
|
"eval_steps_per_second": 38.503, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 6.969696969696971e-06, |
|
"loss": 0.7969, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8829981684684753, |
|
"eval_loss": 0.7243201732635498, |
|
"eval_runtime": 3.5763, |
|
"eval_samples_per_second": 305.906, |
|
"eval_steps_per_second": 38.308, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"learning_rate": 6.464646464646466e-06, |
|
"loss": 0.5348, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.8866544961929321, |
|
"eval_loss": 0.6596779227256775, |
|
"eval_runtime": 3.5885, |
|
"eval_samples_per_second": 304.866, |
|
"eval_steps_per_second": 38.178, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8857403993606567, |
|
"eval_loss": 0.598337709903717, |
|
"eval_runtime": 3.6039, |
|
"eval_samples_per_second": 303.56, |
|
"eval_steps_per_second": 38.014, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 5.95959595959596e-06, |
|
"loss": 0.3744, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.8976234197616577, |
|
"eval_loss": 0.5634561777114868, |
|
"eval_runtime": 3.5353, |
|
"eval_samples_per_second": 309.453, |
|
"eval_steps_per_second": 38.752, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.2564, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8985374569892883, |
|
"eval_loss": 0.5437070727348328, |
|
"eval_runtime": 3.5598, |
|
"eval_samples_per_second": 307.319, |
|
"eval_steps_per_second": 38.485, |
|
"step": 4620 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9012796878814697, |
|
"eval_loss": 0.5124027132987976, |
|
"eval_runtime": 3.5534, |
|
"eval_samples_per_second": 307.874, |
|
"eval_steps_per_second": 38.555, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"learning_rate": 4.94949494949495e-06, |
|
"loss": 0.1862, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9021937847137451, |
|
"eval_loss": 0.5074306130409241, |
|
"eval_runtime": 3.5582, |
|
"eval_samples_per_second": 307.456, |
|
"eval_steps_per_second": 38.502, |
|
"step": 5280 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.1349, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.5027920007705688, |
|
"eval_runtime": 3.5652, |
|
"eval_samples_per_second": 306.859, |
|
"eval_steps_per_second": 38.427, |
|
"step": 5610 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.9076782464981079, |
|
"eval_loss": 0.4876061677932739, |
|
"eval_runtime": 3.5354, |
|
"eval_samples_per_second": 309.439, |
|
"eval_steps_per_second": 38.751, |
|
"step": 5940 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 3.93939393939394e-06, |
|
"loss": 0.0979, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.4970707595348358, |
|
"eval_runtime": 3.5724, |
|
"eval_samples_per_second": 306.234, |
|
"eval_steps_per_second": 38.349, |
|
"step": 6270 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 3.4343434343434347e-06, |
|
"loss": 0.0763, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9021937847137451, |
|
"eval_loss": 0.4940781891345978, |
|
"eval_runtime": 3.5051, |
|
"eval_samples_per_second": 312.116, |
|
"eval_steps_per_second": 39.086, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.4956739842891693, |
|
"eval_runtime": 3.5501, |
|
"eval_samples_per_second": 308.164, |
|
"eval_steps_per_second": 38.591, |
|
"step": 6930 |
|
}, |
|
{ |
|
"epoch": 21.21, |
|
"learning_rate": 2.9292929292929295e-06, |
|
"loss": 0.0602, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.4989473819732666, |
|
"eval_runtime": 3.5171, |
|
"eval_samples_per_second": 311.053, |
|
"eval_steps_per_second": 38.953, |
|
"step": 7260 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"learning_rate": 2.4242424242424244e-06, |
|
"loss": 0.0504, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.49591735005378723, |
|
"eval_runtime": 3.5103, |
|
"eval_samples_per_second": 311.654, |
|
"eval_steps_per_second": 39.028, |
|
"step": 7590 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9031078815460205, |
|
"eval_loss": 0.4943903982639313, |
|
"eval_runtime": 3.7255, |
|
"eval_samples_per_second": 293.651, |
|
"eval_steps_per_second": 36.774, |
|
"step": 7920 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"learning_rate": 1.9191919191919192e-06, |
|
"loss": 0.0422, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.4985043704509735, |
|
"eval_runtime": 3.6733, |
|
"eval_samples_per_second": 297.824, |
|
"eval_steps_per_second": 37.296, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 25.76, |
|
"learning_rate": 1.4141414141414143e-06, |
|
"loss": 0.0379, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy": 0.9049360156059265, |
|
"eval_loss": 0.4970065951347351, |
|
"eval_runtime": 3.7215, |
|
"eval_samples_per_second": 293.971, |
|
"eval_steps_per_second": 36.814, |
|
"step": 8580 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.4949011206626892, |
|
"eval_runtime": 3.6072, |
|
"eval_samples_per_second": 303.279, |
|
"eval_steps_per_second": 37.979, |
|
"step": 8910 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 9.090909090909091e-07, |
|
"loss": 0.0351, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9040219187736511, |
|
"eval_loss": 0.49713513255119324, |
|
"eval_runtime": 3.5069, |
|
"eval_samples_per_second": 311.955, |
|
"eval_steps_per_second": 39.066, |
|
"step": 9240 |
|
}, |
|
{ |
|
"epoch": 28.79, |
|
"learning_rate": 4.040404040404041e-07, |
|
"loss": 0.0321, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_accuracy": 0.9031078815460205, |
|
"eval_loss": 0.49672842025756836, |
|
"eval_runtime": 3.506, |
|
"eval_samples_per_second": 312.034, |
|
"eval_steps_per_second": 39.076, |
|
"step": 9570 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.9031078815460205, |
|
"eval_loss": 0.4978463649749756, |
|
"eval_runtime": 3.6907, |
|
"eval_samples_per_second": 296.421, |
|
"eval_steps_per_second": 37.12, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"step": 9900, |
|
"total_flos": 2.086398038559744e+16, |
|
"train_loss": 0.7703812461429173, |
|
"train_runtime": 4144.3399, |
|
"train_samples_per_second": 76.413, |
|
"train_steps_per_second": 2.389 |
|
} |
|
], |
|
"max_steps": 9900, |
|
"num_train_epochs": 30, |
|
"total_flos": 2.086398038559744e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|