|
{ |
|
"best_metric": 0.8444921415218445, |
|
"best_model_checkpoint": "/gscratch/xlab/hallisky/style/models/multilabel/03-21-2023_21:54:15/checkpoint-4272", |
|
"epoch": 4.0, |
|
"global_step": 4272, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3538, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_acc": 0.6889150453506889, |
|
"eval_loss": 0.15054994821548462, |
|
"eval_runtime": 41.4893, |
|
"eval_samples_per_second": 348.114, |
|
"eval_steps_per_second": 1.374, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1314, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_acc": 0.7778162431627779, |
|
"eval_loss": 0.10427295416593552, |
|
"eval_runtime": 41.0952, |
|
"eval_samples_per_second": 351.453, |
|
"eval_steps_per_second": 1.387, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 0.1031, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_acc": 0.8037803780378038, |
|
"eval_loss": 0.0903468206524849, |
|
"eval_runtime": 41.1937, |
|
"eval_samples_per_second": 350.612, |
|
"eval_steps_per_second": 1.384, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.0916, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_acc": 0.8165893512428166, |
|
"eval_loss": 0.08489712327718735, |
|
"eval_runtime": 41.1826, |
|
"eval_samples_per_second": 350.706, |
|
"eval_steps_per_second": 1.384, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0807, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_acc": 0.8217821782178217, |
|
"eval_loss": 0.0828377977013588, |
|
"eval_runtime": 41.1771, |
|
"eval_samples_per_second": 350.754, |
|
"eval_steps_per_second": 1.384, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 0.0777, |
|
"step": 1602 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_acc": 0.8273904313508273, |
|
"eval_loss": 0.07905308157205582, |
|
"eval_runtime": 41.1692, |
|
"eval_samples_per_second": 350.821, |
|
"eval_steps_per_second": 1.385, |
|
"step": 1602 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.222222222222223e-06, |
|
"loss": 0.0748, |
|
"step": 1869 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_acc": 0.8333448729488333, |
|
"eval_loss": 0.07718278467655182, |
|
"eval_runtime": 41.1712, |
|
"eval_samples_per_second": 350.804, |
|
"eval_steps_per_second": 1.384, |
|
"step": 1869 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0732, |
|
"step": 2136 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_acc": 0.8379145606868379, |
|
"eval_loss": 0.07422468066215515, |
|
"eval_runtime": 41.1823, |
|
"eval_samples_per_second": 350.709, |
|
"eval_steps_per_second": 1.384, |
|
"step": 2136 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 6.111111111111112e-06, |
|
"loss": 0.0635, |
|
"step": 2403 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"eval_acc": 0.8334141106418335, |
|
"eval_loss": 0.07540296018123627, |
|
"eval_runtime": 41.1954, |
|
"eval_samples_per_second": 350.597, |
|
"eval_steps_per_second": 1.384, |
|
"step": 2403 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 0.0627, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_acc": 0.8368759952918369, |
|
"eval_loss": 0.07453104108572006, |
|
"eval_runtime": 41.204, |
|
"eval_samples_per_second": 350.524, |
|
"eval_steps_per_second": 1.383, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0621, |
|
"step": 2937 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"eval_acc": 0.8378453229938378, |
|
"eval_loss": 0.07528460025787354, |
|
"eval_runtime": 41.1464, |
|
"eval_samples_per_second": 351.015, |
|
"eval_steps_per_second": 1.385, |
|
"step": 2937 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.0616, |
|
"step": 3204 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_acc": 0.8421380599598421, |
|
"eval_loss": 0.07257214188575745, |
|
"eval_runtime": 41.1192, |
|
"eval_samples_per_second": 351.247, |
|
"eval_steps_per_second": 1.386, |
|
"step": 3204 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.88888888888889e-06, |
|
"loss": 0.0544, |
|
"step": 3471 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"eval_acc": 0.844007477670844, |
|
"eval_loss": 0.07361570745706558, |
|
"eval_runtime": 41.4565, |
|
"eval_samples_per_second": 348.389, |
|
"eval_steps_per_second": 1.375, |
|
"step": 3471 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0539, |
|
"step": 3738 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"eval_acc": 0.8441459530568441, |
|
"eval_loss": 0.07325488328933716, |
|
"eval_runtime": 41.1118, |
|
"eval_samples_per_second": 351.311, |
|
"eval_steps_per_second": 1.386, |
|
"step": 3738 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 2.7777777777777783e-06, |
|
"loss": 0.0538, |
|
"step": 4005 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_acc": 0.8426227238108426, |
|
"eval_loss": 0.07311566174030304, |
|
"eval_runtime": 41.1396, |
|
"eval_samples_per_second": 351.073, |
|
"eval_steps_per_second": 1.386, |
|
"step": 4005 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 0.0528, |
|
"step": 4272 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_acc": 0.8444921415218445, |
|
"eval_loss": 0.0724639743566513, |
|
"eval_runtime": 41.153, |
|
"eval_samples_per_second": 350.959, |
|
"eval_steps_per_second": 1.385, |
|
"step": 4272 |
|
} |
|
], |
|
"max_steps": 5340, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.758532164135154e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|