|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.696531791907514, |
|
"global_step": 26000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.020469596628538e-06, |
|
"loss": 3.3245, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_exact_match": 69.29990539262063, |
|
"eval_f1": 79.5326236755824, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.869710596102849e-06, |
|
"loss": 1.3078, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_exact_match": 79.11069063386944, |
|
"eval_f1": 87.17731552605335, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.485376071332488e-06, |
|
"loss": 1.0887, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_exact_match": 81.40964995269631, |
|
"eval_f1": 88.70193797570302, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 9.101041546562128e-06, |
|
"loss": 1.0038, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_exact_match": 82.98959318826869, |
|
"eval_f1": 90.04823015326417, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 8.716707021791769e-06, |
|
"loss": 0.9564, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_exact_match": 83.6802270577105, |
|
"eval_f1": 90.58225811526646, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 8.332372497021408e-06, |
|
"loss": 0.8923, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"eval_exact_match": 83.91674550614948, |
|
"eval_f1": 90.87384438135182, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 7.948037972251048e-06, |
|
"loss": 0.8145, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_exact_match": 84.65468306527909, |
|
"eval_f1": 91.29700250894122, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 7.563703447480688e-06, |
|
"loss": 0.816, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_exact_match": 84.75875118259225, |
|
"eval_f1": 91.47394559853937, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 7.179368922710328e-06, |
|
"loss": 0.7832, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_exact_match": 85.36423841059603, |
|
"eval_f1": 91.79656306794526, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.795034397939968e-06, |
|
"loss": 0.8032, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_exact_match": 84.95742667928099, |
|
"eval_f1": 91.6847055956044, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.410699873169607e-06, |
|
"loss": 0.7782, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_exact_match": 85.51561021759697, |
|
"eval_f1": 91.86846688872524, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.026365348399247e-06, |
|
"loss": 0.6575, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_exact_match": 85.4399243140965, |
|
"eval_f1": 91.69142033782958, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 5.6420308236288864e-06, |
|
"loss": 0.6717, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"eval_exact_match": 85.26963103122044, |
|
"eval_f1": 91.80508749286186, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.257696298858527e-06, |
|
"loss": 0.6647, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_exact_match": 85.42100283822138, |
|
"eval_f1": 91.80456676699409, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.873361774088167e-06, |
|
"loss": 0.6583, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"eval_exact_match": 85.72374645222327, |
|
"eval_f1": 92.1390421209737, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.4890272493178065e-06, |
|
"loss": 0.652, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_exact_match": 85.73320719016083, |
|
"eval_f1": 92.03736167459341, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.104692724547447e-06, |
|
"loss": 0.6102, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"eval_exact_match": 85.26017029328288, |
|
"eval_f1": 91.89783353916026, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.720358199777086e-06, |
|
"loss": 0.5709, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"eval_exact_match": 85.47776726584674, |
|
"eval_f1": 91.91999508342143, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.3360236750067258e-06, |
|
"loss": 0.5745, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"eval_exact_match": 85.3926206244087, |
|
"eval_f1": 91.91182535908425, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 2.951689150236366e-06, |
|
"loss": 0.564, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_exact_match": 85.45884578997162, |
|
"eval_f1": 91.93228761137887, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 2.567354625466006e-06, |
|
"loss": 0.5774, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"eval_exact_match": 85.81835383159887, |
|
"eval_f1": 92.14088561726169, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 2.1830201006956455e-06, |
|
"loss": 0.5667, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"eval_exact_match": 85.71428571428571, |
|
"eval_f1": 92.11242630306818, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.7986855759252856e-06, |
|
"loss": 0.514, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"eval_exact_match": 85.88457899716178, |
|
"eval_f1": 92.24928398942093, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.4143510511549255e-06, |
|
"loss": 0.5074, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"eval_exact_match": 85.88457899716178, |
|
"eval_f1": 92.20790215772705, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.0300165263845651e-06, |
|
"loss": 0.5195, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"eval_exact_match": 85.72374645222327, |
|
"eval_f1": 92.19634199090316, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 6.45682001614205e-07, |
|
"loss": 0.5049, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"eval_exact_match": 85.82781456953643, |
|
"eval_f1": 92.25386967488927, |
|
"step": 26000 |
|
} |
|
], |
|
"max_steps": 27680, |
|
"num_train_epochs": 5, |
|
"total_flos": 8.151831697947034e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|