|
{ |
|
"best_metric": 0.03381425514817238, |
|
"best_model_checkpoint": "autotrain-pmf0g-rj8fa/checkpoint-330", |
|
"epoch": 6.0, |
|
"eval_steps": 500, |
|
"global_step": 330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 1.5416135787963867, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 1.3908, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 1.7443188428878784, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 1.3651, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 2.275134563446045, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 1.2519, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 2.0600032806396484, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 0.9604, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 1.9959027767181396, |
|
"learning_rate": 4.454545454545455e-05, |
|
"loss": 0.5255, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9748858447488584, |
|
"eval_f1_macro": 0.9756765342178342, |
|
"eval_f1_micro": 0.9748858447488584, |
|
"eval_f1_weighted": 0.9749751427585824, |
|
"eval_loss": 0.20833216607570648, |
|
"eval_precision_macro": 0.9759725400457666, |
|
"eval_precision_micro": 0.9748858447488584, |
|
"eval_precision_weighted": 0.9751052736068879, |
|
"eval_recall_macro": 0.9754215648989755, |
|
"eval_recall_micro": 0.9748858447488584, |
|
"eval_recall_weighted": 0.9748858447488584, |
|
"eval_runtime": 0.1296, |
|
"eval_samples_per_second": 3380.791, |
|
"eval_steps_per_second": 54.031, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 2.120863199234009, |
|
"learning_rate": 4.9595959595959594e-05, |
|
"loss": 0.2706, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 0.6585132479667664, |
|
"learning_rate": 4.858585858585859e-05, |
|
"loss": 0.1525, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 4.553682327270508, |
|
"learning_rate": 4.7575757575757576e-05, |
|
"loss": 0.0592, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 0.41076862812042236, |
|
"learning_rate": 4.656565656565657e-05, |
|
"loss": 0.0615, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 3.347062110900879, |
|
"learning_rate": 4.565656565656566e-05, |
|
"loss": 0.1097, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 3.071051597595215, |
|
"learning_rate": 4.464646464646465e-05, |
|
"loss": 0.0851, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9748858447488584, |
|
"eval_f1_macro": 0.9754834367482361, |
|
"eval_f1_micro": 0.9748858447488584, |
|
"eval_f1_weighted": 0.9749041744765222, |
|
"eval_loss": 0.07598946988582611, |
|
"eval_precision_macro": 0.9755606462503015, |
|
"eval_precision_micro": 0.9748858447488584, |
|
"eval_precision_weighted": 0.9752209332157372, |
|
"eval_recall_macro": 0.9756989697593826, |
|
"eval_recall_micro": 0.9748858447488584, |
|
"eval_recall_weighted": 0.9748858447488584, |
|
"eval_runtime": 0.1241, |
|
"eval_samples_per_second": 3530.172, |
|
"eval_steps_per_second": 56.418, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 0.41413745284080505, |
|
"learning_rate": 4.3636363636363636e-05, |
|
"loss": 0.0477, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 5.531299591064453, |
|
"learning_rate": 4.262626262626263e-05, |
|
"loss": 0.0332, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 0.4154397249221802, |
|
"learning_rate": 4.161616161616162e-05, |
|
"loss": 0.0398, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"grad_norm": 2.7882883548736572, |
|
"learning_rate": 4.0606060606060606e-05, |
|
"loss": 0.027, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 3.329517364501953, |
|
"learning_rate": 3.9595959595959594e-05, |
|
"loss": 0.0104, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9885844748858448, |
|
"eval_f1_macro": 0.9888365654549831, |
|
"eval_f1_micro": 0.9885844748858448, |
|
"eval_f1_weighted": 0.9886138949055431, |
|
"eval_loss": 0.04520614817738533, |
|
"eval_precision_macro": 0.9889521258788082, |
|
"eval_precision_micro": 0.9885844748858448, |
|
"eval_precision_weighted": 0.9888428242322514, |
|
"eval_recall_macro": 0.9889150852693966, |
|
"eval_recall_micro": 0.9885844748858448, |
|
"eval_recall_weighted": 0.9885844748858448, |
|
"eval_runtime": 0.1261, |
|
"eval_samples_per_second": 3473.478, |
|
"eval_steps_per_second": 55.512, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"grad_norm": 0.08116048574447632, |
|
"learning_rate": 3.858585858585859e-05, |
|
"loss": 0.0334, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"grad_norm": 0.1431998461484909, |
|
"learning_rate": 3.757575757575758e-05, |
|
"loss": 0.0244, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"grad_norm": 0.04078197479248047, |
|
"learning_rate": 3.656565656565657e-05, |
|
"loss": 0.0069, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"grad_norm": 0.03086886927485466, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.0042, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"grad_norm": 0.03221021592617035, |
|
"learning_rate": 3.454545454545455e-05, |
|
"loss": 0.0324, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.04350810497999191, |
|
"learning_rate": 3.3535353535353536e-05, |
|
"loss": 0.0203, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9840182648401826, |
|
"eval_f1_macro": 0.9844112150421562, |
|
"eval_f1_micro": 0.9840182648401826, |
|
"eval_f1_weighted": 0.9839758921897677, |
|
"eval_loss": 0.06017115339636803, |
|
"eval_precision_macro": 0.9844066713333537, |
|
"eval_precision_micro": 0.9840182648401826, |
|
"eval_precision_weighted": 0.984073353425936, |
|
"eval_recall_macro": 0.9845520881850645, |
|
"eval_recall_micro": 0.9840182648401826, |
|
"eval_recall_weighted": 0.9840182648401826, |
|
"eval_runtime": 0.124, |
|
"eval_samples_per_second": 3533.459, |
|
"eval_steps_per_second": 56.471, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"grad_norm": 0.09443140029907227, |
|
"learning_rate": 3.2525252525252524e-05, |
|
"loss": 0.0034, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"grad_norm": 2.7363362312316895, |
|
"learning_rate": 3.151515151515151e-05, |
|
"loss": 0.0049, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"grad_norm": 0.026695426553487778, |
|
"learning_rate": 3.050505050505051e-05, |
|
"loss": 0.0024, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"grad_norm": 0.023897232487797737, |
|
"learning_rate": 2.9494949494949498e-05, |
|
"loss": 0.0022, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"grad_norm": 0.021171841770410538, |
|
"learning_rate": 2.8484848484848486e-05, |
|
"loss": 0.0021, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9885844748858448, |
|
"eval_f1_macro": 0.9888140343800068, |
|
"eval_f1_micro": 0.9885844748858448, |
|
"eval_f1_weighted": 0.9885396362946075, |
|
"eval_loss": 0.06403101235628128, |
|
"eval_precision_macro": 0.9890982392627942, |
|
"eval_precision_micro": 0.9885844748858448, |
|
"eval_precision_weighted": 0.9889162145164453, |
|
"eval_recall_macro": 0.9889380530973452, |
|
"eval_recall_micro": 0.9885844748858448, |
|
"eval_recall_weighted": 0.9885844748858448, |
|
"eval_runtime": 0.1253, |
|
"eval_samples_per_second": 3496.411, |
|
"eval_steps_per_second": 55.879, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"grad_norm": 0.018654897809028625, |
|
"learning_rate": 2.7474747474747474e-05, |
|
"loss": 0.0024, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"grad_norm": 0.02241847664117813, |
|
"learning_rate": 2.6464646464646466e-05, |
|
"loss": 0.0019, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"grad_norm": 0.018668798729777336, |
|
"learning_rate": 2.5454545454545454e-05, |
|
"loss": 0.0017, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"grad_norm": 0.020943326875567436, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.0017, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"grad_norm": 0.01833685301244259, |
|
"learning_rate": 2.3434343434343436e-05, |
|
"loss": 0.0017, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.01634540781378746, |
|
"learning_rate": 2.2424242424242424e-05, |
|
"loss": 0.0016, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9908675799086758, |
|
"eval_f1_macro": 0.9910410929202866, |
|
"eval_f1_micro": 0.9908675799086758, |
|
"eval_f1_weighted": 0.9908473335613555, |
|
"eval_loss": 0.03381425514817238, |
|
"eval_precision_macro": 0.9909727371947719, |
|
"eval_precision_micro": 0.9908675799086758, |
|
"eval_precision_weighted": 0.9908883151237302, |
|
"eval_recall_macro": 0.9911698494022667, |
|
"eval_recall_micro": 0.9908675799086758, |
|
"eval_recall_weighted": 0.9908675799086758, |
|
"eval_runtime": 0.124, |
|
"eval_samples_per_second": 3531.36, |
|
"eval_steps_per_second": 56.437, |
|
"step": 330 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 349726407917568.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|