cuwfnguyen's picture
Upload folder using huggingface_hub
c54d851 verified
raw
history blame contribute delete
No virus
73.6 kB
{
"best_metric": 0.890067458152771,
"best_model_checkpoint": "autotrain-okjuj-buuqs/checkpoint-804",
"epoch": 6.0,
"eval_steps": 500,
"global_step": 804,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014925373134328358,
"grad_norm": 3.468337297439575,
"learning_rate": 4.975124378109453e-07,
"loss": 3.5512,
"step": 2
},
{
"epoch": 0.029850746268656716,
"grad_norm": 3.3904953002929688,
"learning_rate": 9.950248756218907e-07,
"loss": 3.5495,
"step": 4
},
{
"epoch": 0.04477611940298507,
"grad_norm": 8.210745811462402,
"learning_rate": 1.4925373134328358e-06,
"loss": 3.5323,
"step": 6
},
{
"epoch": 0.05970149253731343,
"grad_norm": 3.321380376815796,
"learning_rate": 1.9900497512437813e-06,
"loss": 3.5311,
"step": 8
},
{
"epoch": 0.07462686567164178,
"grad_norm": 3.103438377380371,
"learning_rate": 2.4875621890547264e-06,
"loss": 3.5312,
"step": 10
},
{
"epoch": 0.08955223880597014,
"grad_norm": 3.2276968955993652,
"learning_rate": 2.9850746268656716e-06,
"loss": 3.5472,
"step": 12
},
{
"epoch": 0.1044776119402985,
"grad_norm": 3.511948585510254,
"learning_rate": 3.482587064676617e-06,
"loss": 3.5238,
"step": 14
},
{
"epoch": 0.11940298507462686,
"grad_norm": 3.459820508956909,
"learning_rate": 3.980099502487563e-06,
"loss": 3.5457,
"step": 16
},
{
"epoch": 0.13432835820895522,
"grad_norm": 3.187225103378296,
"learning_rate": 4.477611940298508e-06,
"loss": 3.5064,
"step": 18
},
{
"epoch": 0.14925373134328357,
"grad_norm": 3.3685667514801025,
"learning_rate": 4.975124378109453e-06,
"loss": 3.5019,
"step": 20
},
{
"epoch": 0.16417910447761194,
"grad_norm": 3.525916576385498,
"learning_rate": 5.472636815920398e-06,
"loss": 3.5202,
"step": 22
},
{
"epoch": 0.1791044776119403,
"grad_norm": 3.199352979660034,
"learning_rate": 5.970149253731343e-06,
"loss": 3.4878,
"step": 24
},
{
"epoch": 0.19402985074626866,
"grad_norm": 3.151981830596924,
"learning_rate": 6.467661691542288e-06,
"loss": 3.5254,
"step": 26
},
{
"epoch": 0.208955223880597,
"grad_norm": 3.353118896484375,
"learning_rate": 6.965174129353234e-06,
"loss": 3.5179,
"step": 28
},
{
"epoch": 0.22388059701492538,
"grad_norm": 5.352255344390869,
"learning_rate": 7.4626865671641785e-06,
"loss": 3.4951,
"step": 30
},
{
"epoch": 0.23880597014925373,
"grad_norm": 4.2452712059021,
"learning_rate": 7.960199004975125e-06,
"loss": 3.5144,
"step": 32
},
{
"epoch": 0.2537313432835821,
"grad_norm": 3.4914658069610596,
"learning_rate": 8.45771144278607e-06,
"loss": 3.4755,
"step": 34
},
{
"epoch": 0.26865671641791045,
"grad_norm": 3.413722038269043,
"learning_rate": 8.955223880597016e-06,
"loss": 3.5059,
"step": 36
},
{
"epoch": 0.2835820895522388,
"grad_norm": 3.5784690380096436,
"learning_rate": 9.45273631840796e-06,
"loss": 3.503,
"step": 38
},
{
"epoch": 0.29850746268656714,
"grad_norm": 3.3770291805267334,
"learning_rate": 9.950248756218906e-06,
"loss": 3.4968,
"step": 40
},
{
"epoch": 0.31343283582089554,
"grad_norm": 3.709470748901367,
"learning_rate": 1.0447761194029851e-05,
"loss": 3.5453,
"step": 42
},
{
"epoch": 0.3283582089552239,
"grad_norm": 3.529064655303955,
"learning_rate": 1.0945273631840796e-05,
"loss": 3.4461,
"step": 44
},
{
"epoch": 0.34328358208955223,
"grad_norm": 3.4094252586364746,
"learning_rate": 1.1442786069651743e-05,
"loss": 3.483,
"step": 46
},
{
"epoch": 0.3582089552238806,
"grad_norm": 3.385557174682617,
"learning_rate": 1.1940298507462686e-05,
"loss": 3.4522,
"step": 48
},
{
"epoch": 0.373134328358209,
"grad_norm": 3.4028918743133545,
"learning_rate": 1.2437810945273633e-05,
"loss": 3.4814,
"step": 50
},
{
"epoch": 0.3880597014925373,
"grad_norm": 3.482884168624878,
"learning_rate": 1.2935323383084577e-05,
"loss": 3.4917,
"step": 52
},
{
"epoch": 0.40298507462686567,
"grad_norm": 3.4329397678375244,
"learning_rate": 1.3432835820895523e-05,
"loss": 3.4945,
"step": 54
},
{
"epoch": 0.417910447761194,
"grad_norm": 3.6114747524261475,
"learning_rate": 1.3930348258706468e-05,
"loss": 3.3991,
"step": 56
},
{
"epoch": 0.43283582089552236,
"grad_norm": 3.743497848510742,
"learning_rate": 1.4427860696517415e-05,
"loss": 3.4314,
"step": 58
},
{
"epoch": 0.44776119402985076,
"grad_norm": 13.538641929626465,
"learning_rate": 1.4925373134328357e-05,
"loss": 3.4495,
"step": 60
},
{
"epoch": 0.4626865671641791,
"grad_norm": 3.536511182785034,
"learning_rate": 1.5422885572139304e-05,
"loss": 3.4579,
"step": 62
},
{
"epoch": 0.47761194029850745,
"grad_norm": 4.389700889587402,
"learning_rate": 1.592039800995025e-05,
"loss": 3.3908,
"step": 64
},
{
"epoch": 0.4925373134328358,
"grad_norm": 4.821465969085693,
"learning_rate": 1.6417910447761194e-05,
"loss": 3.4305,
"step": 66
},
{
"epoch": 0.5074626865671642,
"grad_norm": 3.5865774154663086,
"learning_rate": 1.691542288557214e-05,
"loss": 3.4297,
"step": 68
},
{
"epoch": 0.5223880597014925,
"grad_norm": 3.3377368450164795,
"learning_rate": 1.7412935323383088e-05,
"loss": 3.5506,
"step": 70
},
{
"epoch": 0.5373134328358209,
"grad_norm": 3.887446880340576,
"learning_rate": 1.791044776119403e-05,
"loss": 3.4623,
"step": 72
},
{
"epoch": 0.5522388059701493,
"grad_norm": 3.546586275100708,
"learning_rate": 1.8407960199004975e-05,
"loss": 3.4059,
"step": 74
},
{
"epoch": 0.5671641791044776,
"grad_norm": 3.8084304332733154,
"learning_rate": 1.890547263681592e-05,
"loss": 3.3708,
"step": 76
},
{
"epoch": 0.582089552238806,
"grad_norm": 3.5616061687469482,
"learning_rate": 1.9402985074626868e-05,
"loss": 3.4297,
"step": 78
},
{
"epoch": 0.5970149253731343,
"grad_norm": 3.7048122882843018,
"learning_rate": 1.990049751243781e-05,
"loss": 3.3541,
"step": 80
},
{
"epoch": 0.6119402985074627,
"grad_norm": 3.7918601036071777,
"learning_rate": 2.0398009950248755e-05,
"loss": 3.3734,
"step": 82
},
{
"epoch": 0.6268656716417911,
"grad_norm": 4.018467903137207,
"learning_rate": 2.0895522388059702e-05,
"loss": 3.3347,
"step": 84
},
{
"epoch": 0.6417910447761194,
"grad_norm": 3.6919217109680176,
"learning_rate": 2.139303482587065e-05,
"loss": 3.4747,
"step": 86
},
{
"epoch": 0.6567164179104478,
"grad_norm": 3.553607940673828,
"learning_rate": 2.1890547263681592e-05,
"loss": 3.346,
"step": 88
},
{
"epoch": 0.6716417910447762,
"grad_norm": 3.717123508453369,
"learning_rate": 2.238805970149254e-05,
"loss": 3.3681,
"step": 90
},
{
"epoch": 0.6865671641791045,
"grad_norm": 4.329309463500977,
"learning_rate": 2.2885572139303486e-05,
"loss": 3.2841,
"step": 92
},
{
"epoch": 0.7014925373134329,
"grad_norm": 3.9699785709381104,
"learning_rate": 2.338308457711443e-05,
"loss": 3.3339,
"step": 94
},
{
"epoch": 0.7164179104477612,
"grad_norm": 3.9800264835357666,
"learning_rate": 2.3880597014925373e-05,
"loss": 3.272,
"step": 96
},
{
"epoch": 0.7313432835820896,
"grad_norm": 3.881544589996338,
"learning_rate": 2.437810945273632e-05,
"loss": 3.3739,
"step": 98
},
{
"epoch": 0.746268656716418,
"grad_norm": 3.7055130004882812,
"learning_rate": 2.4875621890547266e-05,
"loss": 3.2847,
"step": 100
},
{
"epoch": 0.7611940298507462,
"grad_norm": 4.061646461486816,
"learning_rate": 2.537313432835821e-05,
"loss": 3.2573,
"step": 102
},
{
"epoch": 0.7761194029850746,
"grad_norm": 4.569900035858154,
"learning_rate": 2.5870646766169153e-05,
"loss": 3.2594,
"step": 104
},
{
"epoch": 0.7910447761194029,
"grad_norm": 4.053166389465332,
"learning_rate": 2.6368159203980103e-05,
"loss": 3.2739,
"step": 106
},
{
"epoch": 0.8059701492537313,
"grad_norm": 5.338119029998779,
"learning_rate": 2.6865671641791047e-05,
"loss": 3.1965,
"step": 108
},
{
"epoch": 0.8208955223880597,
"grad_norm": 4.987647533416748,
"learning_rate": 2.736318407960199e-05,
"loss": 3.1951,
"step": 110
},
{
"epoch": 0.835820895522388,
"grad_norm": 4.582657814025879,
"learning_rate": 2.7860696517412937e-05,
"loss": 3.1758,
"step": 112
},
{
"epoch": 0.8507462686567164,
"grad_norm": 4.130568027496338,
"learning_rate": 2.835820895522388e-05,
"loss": 3.2563,
"step": 114
},
{
"epoch": 0.8656716417910447,
"grad_norm": 3.6177594661712646,
"learning_rate": 2.885572139303483e-05,
"loss": 3.1394,
"step": 116
},
{
"epoch": 0.8805970149253731,
"grad_norm": 4.4392242431640625,
"learning_rate": 2.935323383084577e-05,
"loss": 2.9928,
"step": 118
},
{
"epoch": 0.8955223880597015,
"grad_norm": 4.2055439949035645,
"learning_rate": 2.9850746268656714e-05,
"loss": 3.0482,
"step": 120
},
{
"epoch": 0.9104477611940298,
"grad_norm": 4.438642978668213,
"learning_rate": 3.0348258706467664e-05,
"loss": 3.024,
"step": 122
},
{
"epoch": 0.9253731343283582,
"grad_norm": 4.41525936126709,
"learning_rate": 3.084577114427861e-05,
"loss": 3.1619,
"step": 124
},
{
"epoch": 0.9402985074626866,
"grad_norm": 4.188882827758789,
"learning_rate": 3.1343283582089554e-05,
"loss": 3.2697,
"step": 126
},
{
"epoch": 0.9552238805970149,
"grad_norm": 3.903090000152588,
"learning_rate": 3.18407960199005e-05,
"loss": 3.0515,
"step": 128
},
{
"epoch": 0.9701492537313433,
"grad_norm": 4.03048849105835,
"learning_rate": 3.233830845771145e-05,
"loss": 2.9312,
"step": 130
},
{
"epoch": 0.9850746268656716,
"grad_norm": 3.842494249343872,
"learning_rate": 3.283582089552239e-05,
"loss": 3.0714,
"step": 132
},
{
"epoch": 1.0,
"grad_norm": 3.9151153564453125,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.8809,
"step": 134
},
{
"epoch": 1.0,
"eval_accuracy": 0.375,
"eval_f1_macro": 0.2794185541385505,
"eval_f1_micro": 0.375,
"eval_f1_weighted": 0.29179203240000456,
"eval_loss": 2.9781627655029297,
"eval_precision_macro": 0.26769010075416033,
"eval_precision_micro": 0.375,
"eval_precision_weighted": 0.28345060265850164,
"eval_recall_macro": 0.3689075630252101,
"eval_recall_micro": 0.375,
"eval_recall_weighted": 0.375,
"eval_runtime": 38.3444,
"eval_samples_per_second": 5.216,
"eval_steps_per_second": 0.183,
"step": 134
},
{
"epoch": 1.0149253731343284,
"grad_norm": 3.894672155380249,
"learning_rate": 3.383084577114428e-05,
"loss": 2.8126,
"step": 136
},
{
"epoch": 1.0298507462686568,
"grad_norm": 4.255545139312744,
"learning_rate": 3.432835820895522e-05,
"loss": 2.9291,
"step": 138
},
{
"epoch": 1.044776119402985,
"grad_norm": 4.275138854980469,
"learning_rate": 3.4825870646766175e-05,
"loss": 2.8673,
"step": 140
},
{
"epoch": 1.0597014925373134,
"grad_norm": 4.796700477600098,
"learning_rate": 3.5323383084577115e-05,
"loss": 2.8431,
"step": 142
},
{
"epoch": 1.0746268656716418,
"grad_norm": 4.217311859130859,
"learning_rate": 3.582089552238806e-05,
"loss": 2.8104,
"step": 144
},
{
"epoch": 1.0895522388059702,
"grad_norm": 4.417973041534424,
"learning_rate": 3.631840796019901e-05,
"loss": 2.7942,
"step": 146
},
{
"epoch": 1.1044776119402986,
"grad_norm": 3.8580806255340576,
"learning_rate": 3.681592039800995e-05,
"loss": 2.6514,
"step": 148
},
{
"epoch": 1.1194029850746268,
"grad_norm": 4.099730014801025,
"learning_rate": 3.73134328358209e-05,
"loss": 2.693,
"step": 150
},
{
"epoch": 1.1343283582089552,
"grad_norm": 4.598573684692383,
"learning_rate": 3.781094527363184e-05,
"loss": 2.7205,
"step": 152
},
{
"epoch": 1.1492537313432836,
"grad_norm": 4.880782604217529,
"learning_rate": 3.830845771144278e-05,
"loss": 2.8268,
"step": 154
},
{
"epoch": 1.164179104477612,
"grad_norm": 3.9455466270446777,
"learning_rate": 3.8805970149253736e-05,
"loss": 2.5482,
"step": 156
},
{
"epoch": 1.1791044776119404,
"grad_norm": 3.5259761810302734,
"learning_rate": 3.9303482587064676e-05,
"loss": 2.4929,
"step": 158
},
{
"epoch": 1.1940298507462686,
"grad_norm": 4.351325035095215,
"learning_rate": 3.980099502487562e-05,
"loss": 2.6261,
"step": 160
},
{
"epoch": 1.208955223880597,
"grad_norm": 3.8126604557037354,
"learning_rate": 4.029850746268657e-05,
"loss": 2.3924,
"step": 162
},
{
"epoch": 1.2238805970149254,
"grad_norm": 5.261939525604248,
"learning_rate": 4.079601990049751e-05,
"loss": 2.3364,
"step": 164
},
{
"epoch": 1.2388059701492538,
"grad_norm": 5.208514213562012,
"learning_rate": 4.1293532338308464e-05,
"loss": 2.3356,
"step": 166
},
{
"epoch": 1.2537313432835822,
"grad_norm": 4.369601249694824,
"learning_rate": 4.1791044776119404e-05,
"loss": 2.4191,
"step": 168
},
{
"epoch": 1.2686567164179103,
"grad_norm": 5.281370639801025,
"learning_rate": 4.228855721393035e-05,
"loss": 2.4544,
"step": 170
},
{
"epoch": 1.2835820895522387,
"grad_norm": 4.296084880828857,
"learning_rate": 4.27860696517413e-05,
"loss": 2.4446,
"step": 172
},
{
"epoch": 1.2985074626865671,
"grad_norm": 3.7815287113189697,
"learning_rate": 4.328358208955224e-05,
"loss": 2.3687,
"step": 174
},
{
"epoch": 1.3134328358208955,
"grad_norm": 7.161779403686523,
"learning_rate": 4.3781094527363184e-05,
"loss": 2.67,
"step": 176
},
{
"epoch": 1.328358208955224,
"grad_norm": 4.957999229431152,
"learning_rate": 4.427860696517413e-05,
"loss": 2.4644,
"step": 178
},
{
"epoch": 1.3432835820895521,
"grad_norm": 4.455275058746338,
"learning_rate": 4.477611940298508e-05,
"loss": 2.4389,
"step": 180
},
{
"epoch": 1.3582089552238805,
"grad_norm": 5.357694625854492,
"learning_rate": 4.5273631840796025e-05,
"loss": 2.1849,
"step": 182
},
{
"epoch": 1.373134328358209,
"grad_norm": 6.346301078796387,
"learning_rate": 4.577114427860697e-05,
"loss": 2.3134,
"step": 184
},
{
"epoch": 1.3880597014925373,
"grad_norm": 6.189851760864258,
"learning_rate": 4.626865671641791e-05,
"loss": 2.322,
"step": 186
},
{
"epoch": 1.4029850746268657,
"grad_norm": 3.729904890060425,
"learning_rate": 4.676616915422886e-05,
"loss": 2.2053,
"step": 188
},
{
"epoch": 1.417910447761194,
"grad_norm": 4.210063457489014,
"learning_rate": 4.7263681592039805e-05,
"loss": 1.9863,
"step": 190
},
{
"epoch": 1.4328358208955223,
"grad_norm": 4.447649002075195,
"learning_rate": 4.7761194029850745e-05,
"loss": 2.0302,
"step": 192
},
{
"epoch": 1.4477611940298507,
"grad_norm": 5.010406970977783,
"learning_rate": 4.82587064676617e-05,
"loss": 2.379,
"step": 194
},
{
"epoch": 1.462686567164179,
"grad_norm": 4.134596824645996,
"learning_rate": 4.875621890547264e-05,
"loss": 2.1104,
"step": 196
},
{
"epoch": 1.4776119402985075,
"grad_norm": 3.678053140640259,
"learning_rate": 4.9253731343283586e-05,
"loss": 2.0437,
"step": 198
},
{
"epoch": 1.4925373134328357,
"grad_norm": 6.969429016113281,
"learning_rate": 4.975124378109453e-05,
"loss": 2.1688,
"step": 200
},
{
"epoch": 1.5074626865671643,
"grad_norm": 5.489563941955566,
"learning_rate": 4.997236042012162e-05,
"loss": 1.9494,
"step": 202
},
{
"epoch": 1.5223880597014925,
"grad_norm": 3.8776581287384033,
"learning_rate": 4.991708126036485e-05,
"loss": 1.7246,
"step": 204
},
{
"epoch": 1.537313432835821,
"grad_norm": 3.915743350982666,
"learning_rate": 4.9861802100608076e-05,
"loss": 2.1206,
"step": 206
},
{
"epoch": 1.5522388059701493,
"grad_norm": 10.498429298400879,
"learning_rate": 4.9806522940851304e-05,
"loss": 1.8089,
"step": 208
},
{
"epoch": 1.5671641791044775,
"grad_norm": 5.074433326721191,
"learning_rate": 4.975124378109453e-05,
"loss": 2.2841,
"step": 210
},
{
"epoch": 1.582089552238806,
"grad_norm": 16.675743103027344,
"learning_rate": 4.969596462133776e-05,
"loss": 1.9404,
"step": 212
},
{
"epoch": 1.5970149253731343,
"grad_norm": 4.692626953125,
"learning_rate": 4.964068546158099e-05,
"loss": 1.9563,
"step": 214
},
{
"epoch": 1.6119402985074627,
"grad_norm": 4.603738307952881,
"learning_rate": 4.958540630182422e-05,
"loss": 2.4004,
"step": 216
},
{
"epoch": 1.626865671641791,
"grad_norm": 29.3167781829834,
"learning_rate": 4.9530127142067445e-05,
"loss": 2.1461,
"step": 218
},
{
"epoch": 1.6417910447761193,
"grad_norm": 4.929641246795654,
"learning_rate": 4.947484798231067e-05,
"loss": 1.6009,
"step": 220
},
{
"epoch": 1.6567164179104479,
"grad_norm": 7.109385967254639,
"learning_rate": 4.94195688225539e-05,
"loss": 1.884,
"step": 222
},
{
"epoch": 1.671641791044776,
"grad_norm": 14.075770378112793,
"learning_rate": 4.936428966279712e-05,
"loss": 1.6543,
"step": 224
},
{
"epoch": 1.6865671641791045,
"grad_norm": 4.957328796386719,
"learning_rate": 4.930901050304035e-05,
"loss": 1.6223,
"step": 226
},
{
"epoch": 1.7014925373134329,
"grad_norm": 6.562300205230713,
"learning_rate": 4.9253731343283586e-05,
"loss": 1.4456,
"step": 228
},
{
"epoch": 1.716417910447761,
"grad_norm": 5.4999237060546875,
"learning_rate": 4.9198452183526814e-05,
"loss": 1.7814,
"step": 230
},
{
"epoch": 1.7313432835820897,
"grad_norm": 5.5423736572265625,
"learning_rate": 4.914317302377004e-05,
"loss": 1.884,
"step": 232
},
{
"epoch": 1.7462686567164178,
"grad_norm": 5.507659435272217,
"learning_rate": 4.908789386401327e-05,
"loss": 1.8302,
"step": 234
},
{
"epoch": 1.7611940298507462,
"grad_norm": 7.983649730682373,
"learning_rate": 4.90326147042565e-05,
"loss": 1.9323,
"step": 236
},
{
"epoch": 1.7761194029850746,
"grad_norm": 4.843520641326904,
"learning_rate": 4.8977335544499726e-05,
"loss": 1.6411,
"step": 238
},
{
"epoch": 1.7910447761194028,
"grad_norm": 4.717188835144043,
"learning_rate": 4.8922056384742954e-05,
"loss": 1.6714,
"step": 240
},
{
"epoch": 1.8059701492537314,
"grad_norm": 4.654526233673096,
"learning_rate": 4.886677722498618e-05,
"loss": 1.6478,
"step": 242
},
{
"epoch": 1.8208955223880596,
"grad_norm": 5.104541301727295,
"learning_rate": 4.881149806522941e-05,
"loss": 1.6125,
"step": 244
},
{
"epoch": 1.835820895522388,
"grad_norm": 4.356812000274658,
"learning_rate": 4.875621890547264e-05,
"loss": 1.9471,
"step": 246
},
{
"epoch": 1.8507462686567164,
"grad_norm": 4.166391849517822,
"learning_rate": 4.870093974571587e-05,
"loss": 1.4783,
"step": 248
},
{
"epoch": 1.8656716417910446,
"grad_norm": 6.2490057945251465,
"learning_rate": 4.8645660585959095e-05,
"loss": 1.8157,
"step": 250
},
{
"epoch": 1.8805970149253732,
"grad_norm": 5.395322322845459,
"learning_rate": 4.859038142620232e-05,
"loss": 1.8702,
"step": 252
},
{
"epoch": 1.8955223880597014,
"grad_norm": 4.278140544891357,
"learning_rate": 4.853510226644555e-05,
"loss": 1.5588,
"step": 254
},
{
"epoch": 1.9104477611940298,
"grad_norm": 5.143709182739258,
"learning_rate": 4.847982310668878e-05,
"loss": 1.5783,
"step": 256
},
{
"epoch": 1.9253731343283582,
"grad_norm": 8.840173721313477,
"learning_rate": 4.842454394693201e-05,
"loss": 1.5149,
"step": 258
},
{
"epoch": 1.9402985074626866,
"grad_norm": 5.001150608062744,
"learning_rate": 4.836926478717524e-05,
"loss": 1.5529,
"step": 260
},
{
"epoch": 1.955223880597015,
"grad_norm": 4.259793281555176,
"learning_rate": 4.831398562741847e-05,
"loss": 1.7178,
"step": 262
},
{
"epoch": 1.9701492537313432,
"grad_norm": 3.2455430030822754,
"learning_rate": 4.82587064676617e-05,
"loss": 1.26,
"step": 264
},
{
"epoch": 1.9850746268656716,
"grad_norm": 3.899599313735962,
"learning_rate": 4.820342730790493e-05,
"loss": 1.5229,
"step": 266
},
{
"epoch": 2.0,
"grad_norm": 6.097535610198975,
"learning_rate": 4.814814814814815e-05,
"loss": 1.6973,
"step": 268
},
{
"epoch": 2.0,
"eval_accuracy": 0.63,
"eval_f1_macro": 0.569843507632617,
"eval_f1_micro": 0.63,
"eval_f1_weighted": 0.5873017265672064,
"eval_loss": 1.6746174097061157,
"eval_precision_macro": 0.5998444366091426,
"eval_precision_micro": 0.63,
"eval_precision_weighted": 0.623490675990676,
"eval_recall_macro": 0.6212885154061624,
"eval_recall_micro": 0.63,
"eval_recall_weighted": 0.63,
"eval_runtime": 35.3294,
"eval_samples_per_second": 5.661,
"eval_steps_per_second": 0.198,
"step": 268
},
{
"epoch": 2.014925373134328,
"grad_norm": 2.5601863861083984,
"learning_rate": 4.8092868988391376e-05,
"loss": 1.0241,
"step": 270
},
{
"epoch": 2.029850746268657,
"grad_norm": 4.754575252532959,
"learning_rate": 4.8037589828634604e-05,
"loss": 1.5737,
"step": 272
},
{
"epoch": 2.044776119402985,
"grad_norm": 4.370357990264893,
"learning_rate": 4.798231066887783e-05,
"loss": 1.4961,
"step": 274
},
{
"epoch": 2.0597014925373136,
"grad_norm": 8.790416717529297,
"learning_rate": 4.792703150912106e-05,
"loss": 1.6075,
"step": 276
},
{
"epoch": 2.074626865671642,
"grad_norm": 4.466557502746582,
"learning_rate": 4.787175234936429e-05,
"loss": 1.4832,
"step": 278
},
{
"epoch": 2.08955223880597,
"grad_norm": 3.5914487838745117,
"learning_rate": 4.781647318960752e-05,
"loss": 1.1338,
"step": 280
},
{
"epoch": 2.1044776119402986,
"grad_norm": 3.8444533348083496,
"learning_rate": 4.7761194029850745e-05,
"loss": 1.2921,
"step": 282
},
{
"epoch": 2.1194029850746268,
"grad_norm": 7.032942295074463,
"learning_rate": 4.770591487009397e-05,
"loss": 1.5322,
"step": 284
},
{
"epoch": 2.1343283582089554,
"grad_norm": 6.563926696777344,
"learning_rate": 4.76506357103372e-05,
"loss": 1.2677,
"step": 286
},
{
"epoch": 2.1492537313432836,
"grad_norm": 3.413158655166626,
"learning_rate": 4.7595356550580436e-05,
"loss": 1.2462,
"step": 288
},
{
"epoch": 2.1641791044776117,
"grad_norm": 4.282972812652588,
"learning_rate": 4.7540077390823664e-05,
"loss": 1.1632,
"step": 290
},
{
"epoch": 2.1791044776119404,
"grad_norm": 11.003588676452637,
"learning_rate": 4.748479823106689e-05,
"loss": 1.2687,
"step": 292
},
{
"epoch": 2.1940298507462686,
"grad_norm": 4.725019931793213,
"learning_rate": 4.742951907131012e-05,
"loss": 1.2352,
"step": 294
},
{
"epoch": 2.208955223880597,
"grad_norm": 3.6079459190368652,
"learning_rate": 4.737423991155335e-05,
"loss": 1.4994,
"step": 296
},
{
"epoch": 2.2238805970149254,
"grad_norm": 3.576037645339966,
"learning_rate": 4.731896075179658e-05,
"loss": 1.1007,
"step": 298
},
{
"epoch": 2.2388059701492535,
"grad_norm": 3.0327272415161133,
"learning_rate": 4.7263681592039805e-05,
"loss": 1.0624,
"step": 300
},
{
"epoch": 2.253731343283582,
"grad_norm": 5.710484504699707,
"learning_rate": 4.720840243228303e-05,
"loss": 1.2262,
"step": 302
},
{
"epoch": 2.2686567164179103,
"grad_norm": 3.059814453125,
"learning_rate": 4.715312327252626e-05,
"loss": 1.0292,
"step": 304
},
{
"epoch": 2.283582089552239,
"grad_norm": 4.122474193572998,
"learning_rate": 4.709784411276949e-05,
"loss": 1.1731,
"step": 306
},
{
"epoch": 2.298507462686567,
"grad_norm": 8.998103141784668,
"learning_rate": 4.704256495301272e-05,
"loss": 1.5075,
"step": 308
},
{
"epoch": 2.3134328358208958,
"grad_norm": 3.947554349899292,
"learning_rate": 4.6987285793255946e-05,
"loss": 1.1681,
"step": 310
},
{
"epoch": 2.328358208955224,
"grad_norm": 6.467041492462158,
"learning_rate": 4.693200663349917e-05,
"loss": 1.2688,
"step": 312
},
{
"epoch": 2.343283582089552,
"grad_norm": 6.714489936828613,
"learning_rate": 4.68767274737424e-05,
"loss": 1.5061,
"step": 314
},
{
"epoch": 2.3582089552238807,
"grad_norm": 4.802642822265625,
"learning_rate": 4.682144831398563e-05,
"loss": 1.0498,
"step": 316
},
{
"epoch": 2.373134328358209,
"grad_norm": 3.7944350242614746,
"learning_rate": 4.676616915422886e-05,
"loss": 1.1943,
"step": 318
},
{
"epoch": 2.388059701492537,
"grad_norm": 6.245696067810059,
"learning_rate": 4.6710889994472086e-05,
"loss": 0.9827,
"step": 320
},
{
"epoch": 2.4029850746268657,
"grad_norm": 3.385528326034546,
"learning_rate": 4.6655610834715315e-05,
"loss": 1.2411,
"step": 322
},
{
"epoch": 2.417910447761194,
"grad_norm": 4.5617828369140625,
"learning_rate": 4.660033167495854e-05,
"loss": 0.947,
"step": 324
},
{
"epoch": 2.4328358208955225,
"grad_norm": 6.033749580383301,
"learning_rate": 4.654505251520177e-05,
"loss": 1.1135,
"step": 326
},
{
"epoch": 2.4477611940298507,
"grad_norm": 4.091092586517334,
"learning_rate": 4.6489773355445e-05,
"loss": 1.2219,
"step": 328
},
{
"epoch": 2.4626865671641793,
"grad_norm": 4.028707027435303,
"learning_rate": 4.643449419568823e-05,
"loss": 0.8016,
"step": 330
},
{
"epoch": 2.4776119402985075,
"grad_norm": 13.044775009155273,
"learning_rate": 4.6379215035931455e-05,
"loss": 1.0265,
"step": 332
},
{
"epoch": 2.4925373134328357,
"grad_norm": 3.4753003120422363,
"learning_rate": 4.632393587617468e-05,
"loss": 1.1795,
"step": 334
},
{
"epoch": 2.5074626865671643,
"grad_norm": 10.754464149475098,
"learning_rate": 4.626865671641791e-05,
"loss": 1.0397,
"step": 336
},
{
"epoch": 2.5223880597014925,
"grad_norm": 5.801613807678223,
"learning_rate": 4.621337755666114e-05,
"loss": 1.0849,
"step": 338
},
{
"epoch": 2.5373134328358207,
"grad_norm": 7.185355186462402,
"learning_rate": 4.615809839690437e-05,
"loss": 1.3039,
"step": 340
},
{
"epoch": 2.5522388059701493,
"grad_norm": 5.9482622146606445,
"learning_rate": 4.6102819237147596e-05,
"loss": 0.9354,
"step": 342
},
{
"epoch": 2.5671641791044775,
"grad_norm": 5.785240173339844,
"learning_rate": 4.6047540077390824e-05,
"loss": 1.1711,
"step": 344
},
{
"epoch": 2.582089552238806,
"grad_norm": 4.588739395141602,
"learning_rate": 4.599226091763406e-05,
"loss": 1.2397,
"step": 346
},
{
"epoch": 2.5970149253731343,
"grad_norm": 9.58119010925293,
"learning_rate": 4.593698175787729e-05,
"loss": 1.0885,
"step": 348
},
{
"epoch": 2.611940298507463,
"grad_norm": 7.116174221038818,
"learning_rate": 4.5881702598120515e-05,
"loss": 1.4472,
"step": 350
},
{
"epoch": 2.626865671641791,
"grad_norm": 4.783851146697998,
"learning_rate": 4.582642343836374e-05,
"loss": 1.0596,
"step": 352
},
{
"epoch": 2.6417910447761193,
"grad_norm": 8.053864479064941,
"learning_rate": 4.577114427860697e-05,
"loss": 1.1109,
"step": 354
},
{
"epoch": 2.656716417910448,
"grad_norm": 7.039917945861816,
"learning_rate": 4.571586511885019e-05,
"loss": 1.0317,
"step": 356
},
{
"epoch": 2.671641791044776,
"grad_norm": 10.328742980957031,
"learning_rate": 4.566058595909342e-05,
"loss": 1.3927,
"step": 358
},
{
"epoch": 2.6865671641791042,
"grad_norm": 6.044647693634033,
"learning_rate": 4.560530679933665e-05,
"loss": 1.6115,
"step": 360
},
{
"epoch": 2.701492537313433,
"grad_norm": 5.099875450134277,
"learning_rate": 4.555002763957988e-05,
"loss": 0.7492,
"step": 362
},
{
"epoch": 2.716417910447761,
"grad_norm": 5.763055324554443,
"learning_rate": 4.5494748479823105e-05,
"loss": 1.2244,
"step": 364
},
{
"epoch": 2.7313432835820897,
"grad_norm": 7.4969987869262695,
"learning_rate": 4.5439469320066333e-05,
"loss": 0.9924,
"step": 366
},
{
"epoch": 2.746268656716418,
"grad_norm": 11.879778861999512,
"learning_rate": 4.538419016030956e-05,
"loss": 1.3022,
"step": 368
},
{
"epoch": 2.7611940298507465,
"grad_norm": 2.6563034057617188,
"learning_rate": 4.532891100055279e-05,
"loss": 1.0468,
"step": 370
},
{
"epoch": 2.7761194029850746,
"grad_norm": 4.352832317352295,
"learning_rate": 4.5273631840796025e-05,
"loss": 0.7805,
"step": 372
},
{
"epoch": 2.791044776119403,
"grad_norm": 5.752820014953613,
"learning_rate": 4.521835268103925e-05,
"loss": 1.0971,
"step": 374
},
{
"epoch": 2.8059701492537314,
"grad_norm": 6.028427600860596,
"learning_rate": 4.516307352128248e-05,
"loss": 0.9887,
"step": 376
},
{
"epoch": 2.8208955223880596,
"grad_norm": 5.541314601898193,
"learning_rate": 4.510779436152571e-05,
"loss": 1.0044,
"step": 378
},
{
"epoch": 2.835820895522388,
"grad_norm": 6.708818435668945,
"learning_rate": 4.505251520176894e-05,
"loss": 0.9756,
"step": 380
},
{
"epoch": 2.8507462686567164,
"grad_norm": 5.191244602203369,
"learning_rate": 4.4997236042012165e-05,
"loss": 1.0194,
"step": 382
},
{
"epoch": 2.8656716417910446,
"grad_norm": 5.064216613769531,
"learning_rate": 4.4941956882255393e-05,
"loss": 1.1003,
"step": 384
},
{
"epoch": 2.8805970149253732,
"grad_norm": 4.351062774658203,
"learning_rate": 4.488667772249862e-05,
"loss": 1.0402,
"step": 386
},
{
"epoch": 2.8955223880597014,
"grad_norm": 9.046034812927246,
"learning_rate": 4.483139856274185e-05,
"loss": 1.0788,
"step": 388
},
{
"epoch": 2.91044776119403,
"grad_norm": 9.672553062438965,
"learning_rate": 4.477611940298508e-05,
"loss": 1.1218,
"step": 390
},
{
"epoch": 2.925373134328358,
"grad_norm": 6.787928581237793,
"learning_rate": 4.4720840243228306e-05,
"loss": 0.9067,
"step": 392
},
{
"epoch": 2.9402985074626864,
"grad_norm": 7.9477152824401855,
"learning_rate": 4.4665561083471534e-05,
"loss": 1.0954,
"step": 394
},
{
"epoch": 2.955223880597015,
"grad_norm": 6.69403600692749,
"learning_rate": 4.461028192371476e-05,
"loss": 1.2409,
"step": 396
},
{
"epoch": 2.970149253731343,
"grad_norm": 6.419997692108154,
"learning_rate": 4.455500276395799e-05,
"loss": 0.7133,
"step": 398
},
{
"epoch": 2.9850746268656714,
"grad_norm": 6.382747173309326,
"learning_rate": 4.449972360420122e-05,
"loss": 1.3368,
"step": 400
},
{
"epoch": 3.0,
"grad_norm": 6.085859298706055,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.8309,
"step": 402
},
{
"epoch": 3.0,
"eval_accuracy": 0.7,
"eval_f1_macro": 0.6561413388812769,
"eval_f1_micro": 0.7,
"eval_f1_weighted": 0.6696798888246258,
"eval_loss": 1.1434708833694458,
"eval_precision_macro": 0.6706204141575659,
"eval_precision_micro": 0.7,
"eval_precision_weighted": 0.6840218575284365,
"eval_recall_macro": 0.6900560224089635,
"eval_recall_micro": 0.7,
"eval_recall_weighted": 0.7,
"eval_runtime": 36.7029,
"eval_samples_per_second": 5.449,
"eval_steps_per_second": 0.191,
"step": 402
},
{
"epoch": 3.014925373134328,
"grad_norm": 8.274943351745605,
"learning_rate": 4.4389165284687675e-05,
"loss": 0.728,
"step": 404
},
{
"epoch": 3.029850746268657,
"grad_norm": 3.896862745285034,
"learning_rate": 4.43338861249309e-05,
"loss": 0.8955,
"step": 406
},
{
"epoch": 3.044776119402985,
"grad_norm": 3.3480184078216553,
"learning_rate": 4.427860696517413e-05,
"loss": 0.84,
"step": 408
},
{
"epoch": 3.0597014925373136,
"grad_norm": 9.12331485748291,
"learning_rate": 4.422332780541736e-05,
"loss": 0.8026,
"step": 410
},
{
"epoch": 3.074626865671642,
"grad_norm": 6.628831386566162,
"learning_rate": 4.416804864566059e-05,
"loss": 1.0466,
"step": 412
},
{
"epoch": 3.08955223880597,
"grad_norm": 2.638613224029541,
"learning_rate": 4.4112769485903815e-05,
"loss": 0.6834,
"step": 414
},
{
"epoch": 3.1044776119402986,
"grad_norm": 3.1941006183624268,
"learning_rate": 4.4057490326147043e-05,
"loss": 0.9236,
"step": 416
},
{
"epoch": 3.1194029850746268,
"grad_norm": 4.337372779846191,
"learning_rate": 4.400221116639027e-05,
"loss": 0.8447,
"step": 418
},
{
"epoch": 3.1343283582089554,
"grad_norm": 8.465522766113281,
"learning_rate": 4.39469320066335e-05,
"loss": 0.8768,
"step": 420
},
{
"epoch": 3.1492537313432836,
"grad_norm": 4.252687454223633,
"learning_rate": 4.389165284687673e-05,
"loss": 0.7148,
"step": 422
},
{
"epoch": 3.1641791044776117,
"grad_norm": 5.22479772567749,
"learning_rate": 4.3836373687119956e-05,
"loss": 0.7375,
"step": 424
},
{
"epoch": 3.1791044776119404,
"grad_norm": 3.702615737915039,
"learning_rate": 4.3781094527363184e-05,
"loss": 0.8488,
"step": 426
},
{
"epoch": 3.1940298507462686,
"grad_norm": 3.2345383167266846,
"learning_rate": 4.372581536760641e-05,
"loss": 0.7861,
"step": 428
},
{
"epoch": 3.208955223880597,
"grad_norm": 7.831496238708496,
"learning_rate": 4.367053620784965e-05,
"loss": 1.117,
"step": 430
},
{
"epoch": 3.2238805970149254,
"grad_norm": 2.40031361579895,
"learning_rate": 4.3615257048092875e-05,
"loss": 0.7079,
"step": 432
},
{
"epoch": 3.2388059701492535,
"grad_norm": 4.2407050132751465,
"learning_rate": 4.3559977888336103e-05,
"loss": 0.7778,
"step": 434
},
{
"epoch": 3.253731343283582,
"grad_norm": 3.885312795639038,
"learning_rate": 4.350469872857933e-05,
"loss": 1.0815,
"step": 436
},
{
"epoch": 3.2686567164179103,
"grad_norm": 3.0180788040161133,
"learning_rate": 4.344941956882256e-05,
"loss": 0.6417,
"step": 438
},
{
"epoch": 3.283582089552239,
"grad_norm": 6.578612804412842,
"learning_rate": 4.339414040906579e-05,
"loss": 0.9152,
"step": 440
},
{
"epoch": 3.298507462686567,
"grad_norm": 4.349058151245117,
"learning_rate": 4.3338861249309016e-05,
"loss": 0.9936,
"step": 442
},
{
"epoch": 3.3134328358208958,
"grad_norm": 7.187576770782471,
"learning_rate": 4.328358208955224e-05,
"loss": 0.9757,
"step": 444
},
{
"epoch": 3.328358208955224,
"grad_norm": 3.981358528137207,
"learning_rate": 4.3228302929795465e-05,
"loss": 0.6527,
"step": 446
},
{
"epoch": 3.343283582089552,
"grad_norm": 4.734504222869873,
"learning_rate": 4.3173023770038694e-05,
"loss": 0.917,
"step": 448
},
{
"epoch": 3.3582089552238807,
"grad_norm": 7.110349655151367,
"learning_rate": 4.311774461028192e-05,
"loss": 0.7495,
"step": 450
},
{
"epoch": 3.373134328358209,
"grad_norm": 4.323439121246338,
"learning_rate": 4.306246545052515e-05,
"loss": 0.7577,
"step": 452
},
{
"epoch": 3.388059701492537,
"grad_norm": 6.730025768280029,
"learning_rate": 4.300718629076838e-05,
"loss": 1.2703,
"step": 454
},
{
"epoch": 3.4029850746268657,
"grad_norm": 10.35930347442627,
"learning_rate": 4.2951907131011606e-05,
"loss": 0.4539,
"step": 456
},
{
"epoch": 3.417910447761194,
"grad_norm": 3.9190192222595215,
"learning_rate": 4.289662797125484e-05,
"loss": 0.8863,
"step": 458
},
{
"epoch": 3.4328358208955225,
"grad_norm": 6.045052528381348,
"learning_rate": 4.284134881149807e-05,
"loss": 0.8221,
"step": 460
},
{
"epoch": 3.4477611940298507,
"grad_norm": 9.53062629699707,
"learning_rate": 4.27860696517413e-05,
"loss": 0.9652,
"step": 462
},
{
"epoch": 3.4626865671641793,
"grad_norm": 5.4133710861206055,
"learning_rate": 4.2730790491984525e-05,
"loss": 0.6179,
"step": 464
},
{
"epoch": 3.4776119402985075,
"grad_norm": 5.08922004699707,
"learning_rate": 4.2675511332227754e-05,
"loss": 0.8349,
"step": 466
},
{
"epoch": 3.4925373134328357,
"grad_norm": 9.440414428710938,
"learning_rate": 4.262023217247098e-05,
"loss": 0.7352,
"step": 468
},
{
"epoch": 3.5074626865671643,
"grad_norm": 5.086453437805176,
"learning_rate": 4.256495301271421e-05,
"loss": 0.6674,
"step": 470
},
{
"epoch": 3.5223880597014925,
"grad_norm": 5.283850193023682,
"learning_rate": 4.250967385295744e-05,
"loss": 0.9129,
"step": 472
},
{
"epoch": 3.5373134328358207,
"grad_norm": 6.586909770965576,
"learning_rate": 4.2454394693200666e-05,
"loss": 0.4512,
"step": 474
},
{
"epoch": 3.5522388059701493,
"grad_norm": 6.328141212463379,
"learning_rate": 4.2399115533443894e-05,
"loss": 0.9986,
"step": 476
},
{
"epoch": 3.5671641791044775,
"grad_norm": 11.822186470031738,
"learning_rate": 4.234383637368712e-05,
"loss": 0.6238,
"step": 478
},
{
"epoch": 3.582089552238806,
"grad_norm": 12.303596496582031,
"learning_rate": 4.228855721393035e-05,
"loss": 0.666,
"step": 480
},
{
"epoch": 3.5970149253731343,
"grad_norm": 10.991721153259277,
"learning_rate": 4.223327805417358e-05,
"loss": 0.8176,
"step": 482
},
{
"epoch": 3.611940298507463,
"grad_norm": 4.936182022094727,
"learning_rate": 4.217799889441681e-05,
"loss": 0.6964,
"step": 484
},
{
"epoch": 3.626865671641791,
"grad_norm": 4.455245494842529,
"learning_rate": 4.2122719734660035e-05,
"loss": 0.8681,
"step": 486
},
{
"epoch": 3.6417910447761193,
"grad_norm": 4.1669206619262695,
"learning_rate": 4.206744057490326e-05,
"loss": 0.8556,
"step": 488
},
{
"epoch": 3.656716417910448,
"grad_norm": 2.757246732711792,
"learning_rate": 4.201216141514649e-05,
"loss": 0.6557,
"step": 490
},
{
"epoch": 3.671641791044776,
"grad_norm": 4.065201282501221,
"learning_rate": 4.195688225538972e-05,
"loss": 0.7899,
"step": 492
},
{
"epoch": 3.6865671641791042,
"grad_norm": 5.666295528411865,
"learning_rate": 4.190160309563295e-05,
"loss": 0.7589,
"step": 494
},
{
"epoch": 3.701492537313433,
"grad_norm": 4.929719924926758,
"learning_rate": 4.1846323935876176e-05,
"loss": 0.6709,
"step": 496
},
{
"epoch": 3.716417910447761,
"grad_norm": 6.261268138885498,
"learning_rate": 4.1791044776119404e-05,
"loss": 0.4849,
"step": 498
},
{
"epoch": 3.7313432835820897,
"grad_norm": 1.9559329748153687,
"learning_rate": 4.173576561636263e-05,
"loss": 0.4897,
"step": 500
},
{
"epoch": 3.746268656716418,
"grad_norm": 3.6171419620513916,
"learning_rate": 4.168048645660586e-05,
"loss": 0.4541,
"step": 502
},
{
"epoch": 3.7611940298507465,
"grad_norm": 5.1620988845825195,
"learning_rate": 4.162520729684909e-05,
"loss": 0.4952,
"step": 504
},
{
"epoch": 3.7761194029850746,
"grad_norm": 6.901045322418213,
"learning_rate": 4.1569928137092316e-05,
"loss": 0.8728,
"step": 506
},
{
"epoch": 3.791044776119403,
"grad_norm": 6.784924507141113,
"learning_rate": 4.1514648977335544e-05,
"loss": 1.0379,
"step": 508
},
{
"epoch": 3.8059701492537314,
"grad_norm": 3.829332113265991,
"learning_rate": 4.145936981757877e-05,
"loss": 0.8488,
"step": 510
},
{
"epoch": 3.8208955223880596,
"grad_norm": 8.228715896606445,
"learning_rate": 4.1404090657822e-05,
"loss": 0.4737,
"step": 512
},
{
"epoch": 3.835820895522388,
"grad_norm": 7.089122295379639,
"learning_rate": 4.134881149806523e-05,
"loss": 0.8644,
"step": 514
},
{
"epoch": 3.8507462686567164,
"grad_norm": 6.432941913604736,
"learning_rate": 4.1293532338308464e-05,
"loss": 0.5153,
"step": 516
},
{
"epoch": 3.8656716417910446,
"grad_norm": 8.107361793518066,
"learning_rate": 4.123825317855169e-05,
"loss": 0.6265,
"step": 518
},
{
"epoch": 3.8805970149253732,
"grad_norm": 4.366127014160156,
"learning_rate": 4.118297401879492e-05,
"loss": 0.9007,
"step": 520
},
{
"epoch": 3.8955223880597014,
"grad_norm": 3.596374988555908,
"learning_rate": 4.112769485903815e-05,
"loss": 0.7653,
"step": 522
},
{
"epoch": 3.91044776119403,
"grad_norm": 6.282411098480225,
"learning_rate": 4.1072415699281376e-05,
"loss": 0.8757,
"step": 524
},
{
"epoch": 3.925373134328358,
"grad_norm": 6.818746566772461,
"learning_rate": 4.1017136539524604e-05,
"loss": 0.7569,
"step": 526
},
{
"epoch": 3.9402985074626864,
"grad_norm": 6.6983113288879395,
"learning_rate": 4.096185737976783e-05,
"loss": 0.79,
"step": 528
},
{
"epoch": 3.955223880597015,
"grad_norm": 3.532710075378418,
"learning_rate": 4.090657822001106e-05,
"loss": 0.6981,
"step": 530
},
{
"epoch": 3.970149253731343,
"grad_norm": 6.195641040802002,
"learning_rate": 4.085129906025428e-05,
"loss": 0.8261,
"step": 532
},
{
"epoch": 3.9850746268656714,
"grad_norm": 8.55378246307373,
"learning_rate": 4.079601990049751e-05,
"loss": 0.7682,
"step": 534
},
{
"epoch": 4.0,
"grad_norm": 7.6657891273498535,
"learning_rate": 4.074074074074074e-05,
"loss": 0.6755,
"step": 536
},
{
"epoch": 4.0,
"eval_accuracy": 0.74,
"eval_f1_macro": 0.6978798162621692,
"eval_f1_micro": 0.74,
"eval_f1_weighted": 0.7101990093240095,
"eval_loss": 0.93646240234375,
"eval_precision_macro": 0.7079046770223241,
"eval_precision_micro": 0.74,
"eval_precision_weighted": 0.719476911976912,
"eval_recall_macro": 0.7278711484593837,
"eval_recall_micro": 0.74,
"eval_recall_weighted": 0.74,
"eval_runtime": 40.7905,
"eval_samples_per_second": 4.903,
"eval_steps_per_second": 0.172,
"step": 536
},
{
"epoch": 4.014925373134329,
"grad_norm": 3.920659065246582,
"learning_rate": 4.0685461580983966e-05,
"loss": 0.5528,
"step": 538
},
{
"epoch": 4.029850746268656,
"grad_norm": 5.147222995758057,
"learning_rate": 4.0630182421227194e-05,
"loss": 1.0055,
"step": 540
},
{
"epoch": 4.044776119402985,
"grad_norm": 2.260542869567871,
"learning_rate": 4.057490326147043e-05,
"loss": 0.6422,
"step": 542
},
{
"epoch": 4.059701492537314,
"grad_norm": 5.286315441131592,
"learning_rate": 4.051962410171366e-05,
"loss": 0.5399,
"step": 544
},
{
"epoch": 4.074626865671641,
"grad_norm": 8.412203788757324,
"learning_rate": 4.0464344941956886e-05,
"loss": 0.6129,
"step": 546
},
{
"epoch": 4.08955223880597,
"grad_norm": 6.18348503112793,
"learning_rate": 4.0409065782200114e-05,
"loss": 0.5879,
"step": 548
},
{
"epoch": 4.104477611940299,
"grad_norm": 2.4661457538604736,
"learning_rate": 4.035378662244334e-05,
"loss": 0.5941,
"step": 550
},
{
"epoch": 4.119402985074627,
"grad_norm": 4.261096477508545,
"learning_rate": 4.029850746268657e-05,
"loss": 0.9773,
"step": 552
},
{
"epoch": 4.134328358208955,
"grad_norm": 6.717421054840088,
"learning_rate": 4.02432283029298e-05,
"loss": 0.7255,
"step": 554
},
{
"epoch": 4.149253731343284,
"grad_norm": 4.847568511962891,
"learning_rate": 4.0187949143173026e-05,
"loss": 0.6345,
"step": 556
},
{
"epoch": 4.164179104477612,
"grad_norm": 3.6066529750823975,
"learning_rate": 4.0132669983416254e-05,
"loss": 0.4784,
"step": 558
},
{
"epoch": 4.17910447761194,
"grad_norm": 1.8522593975067139,
"learning_rate": 4.007739082365948e-05,
"loss": 0.4678,
"step": 560
},
{
"epoch": 4.1940298507462686,
"grad_norm": 7.491433620452881,
"learning_rate": 4.002211166390271e-05,
"loss": 0.6955,
"step": 562
},
{
"epoch": 4.208955223880597,
"grad_norm": 3.9711689949035645,
"learning_rate": 3.996683250414594e-05,
"loss": 0.7432,
"step": 564
},
{
"epoch": 4.223880597014926,
"grad_norm": 3.5602264404296875,
"learning_rate": 3.991155334438917e-05,
"loss": 0.6618,
"step": 566
},
{
"epoch": 4.2388059701492535,
"grad_norm": 3.383795976638794,
"learning_rate": 3.9856274184632395e-05,
"loss": 0.6079,
"step": 568
},
{
"epoch": 4.253731343283582,
"grad_norm": 2.320563554763794,
"learning_rate": 3.980099502487562e-05,
"loss": 0.5248,
"step": 570
},
{
"epoch": 4.268656716417911,
"grad_norm": 3.110429048538208,
"learning_rate": 3.974571586511885e-05,
"loss": 0.3517,
"step": 572
},
{
"epoch": 4.2835820895522385,
"grad_norm": 6.524468421936035,
"learning_rate": 3.9690436705362086e-05,
"loss": 0.6481,
"step": 574
},
{
"epoch": 4.298507462686567,
"grad_norm": 3.3383541107177734,
"learning_rate": 3.9635157545605314e-05,
"loss": 0.6297,
"step": 576
},
{
"epoch": 4.313432835820896,
"grad_norm": 1.686207890510559,
"learning_rate": 3.9579878385848536e-05,
"loss": 0.5142,
"step": 578
},
{
"epoch": 4.3283582089552235,
"grad_norm": 9.0740385055542,
"learning_rate": 3.9524599226091764e-05,
"loss": 0.478,
"step": 580
},
{
"epoch": 4.343283582089552,
"grad_norm": 8.83989143371582,
"learning_rate": 3.946932006633499e-05,
"loss": 0.7348,
"step": 582
},
{
"epoch": 4.358208955223881,
"grad_norm": 9.292248725891113,
"learning_rate": 3.941404090657822e-05,
"loss": 0.6026,
"step": 584
},
{
"epoch": 4.373134328358209,
"grad_norm": 4.79518461227417,
"learning_rate": 3.935876174682145e-05,
"loss": 0.818,
"step": 586
},
{
"epoch": 4.388059701492537,
"grad_norm": 6.292643070220947,
"learning_rate": 3.9303482587064676e-05,
"loss": 0.5522,
"step": 588
},
{
"epoch": 4.402985074626866,
"grad_norm": 4.075753688812256,
"learning_rate": 3.9248203427307905e-05,
"loss": 0.5259,
"step": 590
},
{
"epoch": 4.417910447761194,
"grad_norm": 2.985786199569702,
"learning_rate": 3.919292426755113e-05,
"loss": 0.4218,
"step": 592
},
{
"epoch": 4.432835820895522,
"grad_norm": 4.570261478424072,
"learning_rate": 3.913764510779436e-05,
"loss": 0.4735,
"step": 594
},
{
"epoch": 4.447761194029851,
"grad_norm": 2.8057727813720703,
"learning_rate": 3.908236594803759e-05,
"loss": 0.4241,
"step": 596
},
{
"epoch": 4.462686567164179,
"grad_norm": 7.44076681137085,
"learning_rate": 3.902708678828082e-05,
"loss": 0.4751,
"step": 598
},
{
"epoch": 4.477611940298507,
"grad_norm": 4.970695972442627,
"learning_rate": 3.8971807628524045e-05,
"loss": 0.5297,
"step": 600
},
{
"epoch": 4.492537313432836,
"grad_norm": 3.2911581993103027,
"learning_rate": 3.891652846876728e-05,
"loss": 0.4756,
"step": 602
},
{
"epoch": 4.507462686567164,
"grad_norm": 6.148906230926514,
"learning_rate": 3.886124930901051e-05,
"loss": 0.5251,
"step": 604
},
{
"epoch": 4.522388059701493,
"grad_norm": 4.234598636627197,
"learning_rate": 3.8805970149253736e-05,
"loss": 0.3342,
"step": 606
},
{
"epoch": 4.537313432835821,
"grad_norm": 4.786794662475586,
"learning_rate": 3.8750690989496964e-05,
"loss": 0.6528,
"step": 608
},
{
"epoch": 4.552238805970149,
"grad_norm": 8.322453498840332,
"learning_rate": 3.869541182974019e-05,
"loss": 0.5658,
"step": 610
},
{
"epoch": 4.567164179104478,
"grad_norm": 6.955239772796631,
"learning_rate": 3.864013266998342e-05,
"loss": 0.5496,
"step": 612
},
{
"epoch": 4.582089552238806,
"grad_norm": 4.89069938659668,
"learning_rate": 3.858485351022665e-05,
"loss": 0.4191,
"step": 614
},
{
"epoch": 4.597014925373134,
"grad_norm": 9.381403923034668,
"learning_rate": 3.852957435046988e-05,
"loss": 0.5993,
"step": 616
},
{
"epoch": 4.611940298507463,
"grad_norm": 4.977263450622559,
"learning_rate": 3.8474295190713105e-05,
"loss": 0.569,
"step": 618
},
{
"epoch": 4.6268656716417915,
"grad_norm": 3.183396816253662,
"learning_rate": 3.841901603095633e-05,
"loss": 0.5183,
"step": 620
},
{
"epoch": 4.641791044776119,
"grad_norm": 3.1676368713378906,
"learning_rate": 3.8363736871199555e-05,
"loss": 0.4647,
"step": 622
},
{
"epoch": 4.656716417910448,
"grad_norm": 9.969623565673828,
"learning_rate": 3.830845771144278e-05,
"loss": 0.4468,
"step": 624
},
{
"epoch": 4.6716417910447765,
"grad_norm": 11.773579597473145,
"learning_rate": 3.825317855168601e-05,
"loss": 0.477,
"step": 626
},
{
"epoch": 4.686567164179104,
"grad_norm": 3.361043691635132,
"learning_rate": 3.8197899391929246e-05,
"loss": 0.4533,
"step": 628
},
{
"epoch": 4.701492537313433,
"grad_norm": 6.481675148010254,
"learning_rate": 3.8142620232172474e-05,
"loss": 0.5998,
"step": 630
},
{
"epoch": 4.7164179104477615,
"grad_norm": 10.961061477661133,
"learning_rate": 3.80873410724157e-05,
"loss": 0.4246,
"step": 632
},
{
"epoch": 4.731343283582089,
"grad_norm": 11.879002571105957,
"learning_rate": 3.803206191265893e-05,
"loss": 0.4113,
"step": 634
},
{
"epoch": 4.746268656716418,
"grad_norm": 6.598226547241211,
"learning_rate": 3.797678275290216e-05,
"loss": 0.4986,
"step": 636
},
{
"epoch": 4.7611940298507465,
"grad_norm": 9.366608619689941,
"learning_rate": 3.7921503593145386e-05,
"loss": 0.3439,
"step": 638
},
{
"epoch": 4.776119402985074,
"grad_norm": 11.350964546203613,
"learning_rate": 3.7866224433388615e-05,
"loss": 0.8422,
"step": 640
},
{
"epoch": 4.791044776119403,
"grad_norm": 8.099052429199219,
"learning_rate": 3.781094527363184e-05,
"loss": 0.687,
"step": 642
},
{
"epoch": 4.8059701492537314,
"grad_norm": 5.031801223754883,
"learning_rate": 3.775566611387507e-05,
"loss": 0.484,
"step": 644
},
{
"epoch": 4.82089552238806,
"grad_norm": 3.5851833820343018,
"learning_rate": 3.77003869541183e-05,
"loss": 0.6037,
"step": 646
},
{
"epoch": 4.835820895522388,
"grad_norm": 14.049162864685059,
"learning_rate": 3.764510779436153e-05,
"loss": 0.5521,
"step": 648
},
{
"epoch": 4.850746268656716,
"grad_norm": 5.086363315582275,
"learning_rate": 3.7589828634604755e-05,
"loss": 0.5173,
"step": 650
},
{
"epoch": 4.865671641791045,
"grad_norm": 2.833794355392456,
"learning_rate": 3.753454947484798e-05,
"loss": 0.7762,
"step": 652
},
{
"epoch": 4.880597014925373,
"grad_norm": 2.7670750617980957,
"learning_rate": 3.747927031509121e-05,
"loss": 0.5592,
"step": 654
},
{
"epoch": 4.895522388059701,
"grad_norm": 2.6060335636138916,
"learning_rate": 3.742399115533444e-05,
"loss": 0.2484,
"step": 656
},
{
"epoch": 4.91044776119403,
"grad_norm": 5.139571666717529,
"learning_rate": 3.736871199557767e-05,
"loss": 0.5157,
"step": 658
},
{
"epoch": 4.925373134328359,
"grad_norm": 3.047414779663086,
"learning_rate": 3.73134328358209e-05,
"loss": 0.4967,
"step": 660
},
{
"epoch": 4.940298507462686,
"grad_norm": 7.455503940582275,
"learning_rate": 3.725815367606413e-05,
"loss": 0.4034,
"step": 662
},
{
"epoch": 4.955223880597015,
"grad_norm": 12.100015640258789,
"learning_rate": 3.720287451630736e-05,
"loss": 0.5245,
"step": 664
},
{
"epoch": 4.970149253731344,
"grad_norm": 3.1610209941864014,
"learning_rate": 3.714759535655058e-05,
"loss": 0.6381,
"step": 666
},
{
"epoch": 4.985074626865671,
"grad_norm": 3.781731367111206,
"learning_rate": 3.709231619679381e-05,
"loss": 0.5727,
"step": 668
},
{
"epoch": 5.0,
"grad_norm": 1.7648042440414429,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3107,
"step": 670
},
{
"epoch": 5.0,
"eval_accuracy": 0.725,
"eval_f1_macro": 0.67604038118744,
"eval_f1_micro": 0.725,
"eval_f1_weighted": 0.6927145770895771,
"eval_loss": 1.0104252099990845,
"eval_precision_macro": 0.7111726508785332,
"eval_precision_micro": 0.725,
"eval_precision_weighted": 0.7247546897546897,
"eval_recall_macro": 0.7092436974789916,
"eval_recall_micro": 0.725,
"eval_recall_weighted": 0.725,
"eval_runtime": 39.3596,
"eval_samples_per_second": 5.081,
"eval_steps_per_second": 0.178,
"step": 670
},
{
"epoch": 5.014925373134329,
"grad_norm": 1.6856744289398193,
"learning_rate": 3.6981757877280265e-05,
"loss": 0.289,
"step": 672
},
{
"epoch": 5.029850746268656,
"grad_norm": 2.8615660667419434,
"learning_rate": 3.692647871752349e-05,
"loss": 0.4746,
"step": 674
},
{
"epoch": 5.044776119402985,
"grad_norm": 6.883438587188721,
"learning_rate": 3.687119955776672e-05,
"loss": 0.6167,
"step": 676
},
{
"epoch": 5.059701492537314,
"grad_norm": 7.769782543182373,
"learning_rate": 3.681592039800995e-05,
"loss": 0.3471,
"step": 678
},
{
"epoch": 5.074626865671641,
"grad_norm": 3.5327107906341553,
"learning_rate": 3.676064123825318e-05,
"loss": 0.4453,
"step": 680
},
{
"epoch": 5.08955223880597,
"grad_norm": 3.9162540435791016,
"learning_rate": 3.6705362078496405e-05,
"loss": 0.4471,
"step": 682
},
{
"epoch": 5.104477611940299,
"grad_norm": 2.9827005863189697,
"learning_rate": 3.6650082918739633e-05,
"loss": 0.344,
"step": 684
},
{
"epoch": 5.119402985074627,
"grad_norm": 5.053178310394287,
"learning_rate": 3.659480375898287e-05,
"loss": 0.3973,
"step": 686
},
{
"epoch": 5.134328358208955,
"grad_norm": 5.1265716552734375,
"learning_rate": 3.6539524599226097e-05,
"loss": 0.3783,
"step": 688
},
{
"epoch": 5.149253731343284,
"grad_norm": 3.368522882461548,
"learning_rate": 3.6484245439469325e-05,
"loss": 0.5253,
"step": 690
},
{
"epoch": 5.164179104477612,
"grad_norm": 5.44252872467041,
"learning_rate": 3.642896627971255e-05,
"loss": 0.54,
"step": 692
},
{
"epoch": 5.17910447761194,
"grad_norm": 7.313326358795166,
"learning_rate": 3.637368711995578e-05,
"loss": 0.3734,
"step": 694
},
{
"epoch": 5.1940298507462686,
"grad_norm": 3.947737693786621,
"learning_rate": 3.631840796019901e-05,
"loss": 0.4204,
"step": 696
},
{
"epoch": 5.208955223880597,
"grad_norm": 2.3334567546844482,
"learning_rate": 3.626312880044224e-05,
"loss": 0.3622,
"step": 698
},
{
"epoch": 5.223880597014926,
"grad_norm": 4.052284240722656,
"learning_rate": 3.6207849640685465e-05,
"loss": 0.2778,
"step": 700
},
{
"epoch": 5.2388059701492535,
"grad_norm": 11.430601119995117,
"learning_rate": 3.6152570480928693e-05,
"loss": 0.6734,
"step": 702
},
{
"epoch": 5.253731343283582,
"grad_norm": 5.606107234954834,
"learning_rate": 3.609729132117192e-05,
"loss": 0.4028,
"step": 704
},
{
"epoch": 5.268656716417911,
"grad_norm": 13.239790916442871,
"learning_rate": 3.604201216141515e-05,
"loss": 0.6932,
"step": 706
},
{
"epoch": 5.2835820895522385,
"grad_norm": 2.727085590362549,
"learning_rate": 3.598673300165838e-05,
"loss": 0.6325,
"step": 708
},
{
"epoch": 5.298507462686567,
"grad_norm": 4.920370101928711,
"learning_rate": 3.59314538419016e-05,
"loss": 0.215,
"step": 710
},
{
"epoch": 5.313432835820896,
"grad_norm": 3.629782199859619,
"learning_rate": 3.5876174682144834e-05,
"loss": 0.5572,
"step": 712
},
{
"epoch": 5.3283582089552235,
"grad_norm": 5.6093668937683105,
"learning_rate": 3.582089552238806e-05,
"loss": 0.7121,
"step": 714
},
{
"epoch": 5.343283582089552,
"grad_norm": 6.661886215209961,
"learning_rate": 3.576561636263129e-05,
"loss": 0.4479,
"step": 716
},
{
"epoch": 5.358208955223881,
"grad_norm": 10.350639343261719,
"learning_rate": 3.571033720287452e-05,
"loss": 0.5578,
"step": 718
},
{
"epoch": 5.373134328358209,
"grad_norm": 6.086325168609619,
"learning_rate": 3.565505804311775e-05,
"loss": 0.4269,
"step": 720
},
{
"epoch": 5.388059701492537,
"grad_norm": 6.263159275054932,
"learning_rate": 3.5599778883360975e-05,
"loss": 0.5466,
"step": 722
},
{
"epoch": 5.402985074626866,
"grad_norm": 6.2910990715026855,
"learning_rate": 3.55444997236042e-05,
"loss": 0.3659,
"step": 724
},
{
"epoch": 5.417910447761194,
"grad_norm": 4.90096378326416,
"learning_rate": 3.548922056384743e-05,
"loss": 0.6204,
"step": 726
},
{
"epoch": 5.432835820895522,
"grad_norm": 3.6040308475494385,
"learning_rate": 3.543394140409066e-05,
"loss": 0.5589,
"step": 728
},
{
"epoch": 5.447761194029851,
"grad_norm": 14.020445823669434,
"learning_rate": 3.537866224433389e-05,
"loss": 0.5483,
"step": 730
},
{
"epoch": 5.462686567164179,
"grad_norm": 2.3177435398101807,
"learning_rate": 3.5323383084577115e-05,
"loss": 0.3787,
"step": 732
},
{
"epoch": 5.477611940298507,
"grad_norm": 2.891357183456421,
"learning_rate": 3.5268103924820344e-05,
"loss": 0.347,
"step": 734
},
{
"epoch": 5.492537313432836,
"grad_norm": 11.490731239318848,
"learning_rate": 3.521282476506357e-05,
"loss": 0.5475,
"step": 736
},
{
"epoch": 5.507462686567164,
"grad_norm": 2.3620495796203613,
"learning_rate": 3.51575456053068e-05,
"loss": 0.4033,
"step": 738
},
{
"epoch": 5.522388059701493,
"grad_norm": 3.9230000972747803,
"learning_rate": 3.510226644555003e-05,
"loss": 0.4246,
"step": 740
},
{
"epoch": 5.537313432835821,
"grad_norm": 3.205672025680542,
"learning_rate": 3.5046987285793256e-05,
"loss": 0.4462,
"step": 742
},
{
"epoch": 5.552238805970149,
"grad_norm": 5.685773849487305,
"learning_rate": 3.499170812603649e-05,
"loss": 0.379,
"step": 744
},
{
"epoch": 5.567164179104478,
"grad_norm": 5.982370376586914,
"learning_rate": 3.493642896627972e-05,
"loss": 0.4962,
"step": 746
},
{
"epoch": 5.582089552238806,
"grad_norm": 13.228385925292969,
"learning_rate": 3.488114980652295e-05,
"loss": 0.3345,
"step": 748
},
{
"epoch": 5.597014925373134,
"grad_norm": 5.765232086181641,
"learning_rate": 3.4825870646766175e-05,
"loss": 0.4973,
"step": 750
},
{
"epoch": 5.611940298507463,
"grad_norm": 1.701096534729004,
"learning_rate": 3.4770591487009404e-05,
"loss": 0.3608,
"step": 752
},
{
"epoch": 5.6268656716417915,
"grad_norm": 14.079666137695312,
"learning_rate": 3.4715312327252625e-05,
"loss": 0.5854,
"step": 754
},
{
"epoch": 5.641791044776119,
"grad_norm": 3.6387109756469727,
"learning_rate": 3.466003316749585e-05,
"loss": 0.2784,
"step": 756
},
{
"epoch": 5.656716417910448,
"grad_norm": 1.6772111654281616,
"learning_rate": 3.460475400773908e-05,
"loss": 0.2205,
"step": 758
},
{
"epoch": 5.6716417910447765,
"grad_norm": 5.0109405517578125,
"learning_rate": 3.454947484798231e-05,
"loss": 0.4189,
"step": 760
},
{
"epoch": 5.686567164179104,
"grad_norm": 4.438418865203857,
"learning_rate": 3.449419568822554e-05,
"loss": 0.1976,
"step": 762
},
{
"epoch": 5.701492537313433,
"grad_norm": 4.297623157501221,
"learning_rate": 3.4438916528468766e-05,
"loss": 0.3569,
"step": 764
},
{
"epoch": 5.7164179104477615,
"grad_norm": 7.163605213165283,
"learning_rate": 3.4383637368711994e-05,
"loss": 0.2972,
"step": 766
},
{
"epoch": 5.731343283582089,
"grad_norm": 3.6532185077667236,
"learning_rate": 3.432835820895522e-05,
"loss": 0.1953,
"step": 768
},
{
"epoch": 5.746268656716418,
"grad_norm": 10.143959045410156,
"learning_rate": 3.427307904919845e-05,
"loss": 0.4292,
"step": 770
},
{
"epoch": 5.7611940298507465,
"grad_norm": 7.471317768096924,
"learning_rate": 3.4217799889441685e-05,
"loss": 0.472,
"step": 772
},
{
"epoch": 5.776119402985074,
"grad_norm": 3.14628529548645,
"learning_rate": 3.416252072968491e-05,
"loss": 0.4863,
"step": 774
},
{
"epoch": 5.791044776119403,
"grad_norm": 2.2698168754577637,
"learning_rate": 3.410724156992814e-05,
"loss": 0.2093,
"step": 776
},
{
"epoch": 5.8059701492537314,
"grad_norm": 3.6853973865509033,
"learning_rate": 3.405196241017137e-05,
"loss": 0.5749,
"step": 778
},
{
"epoch": 5.82089552238806,
"grad_norm": 9.027390480041504,
"learning_rate": 3.39966832504146e-05,
"loss": 0.8224,
"step": 780
},
{
"epoch": 5.835820895522388,
"grad_norm": 4.245992660522461,
"learning_rate": 3.3941404090657825e-05,
"loss": 0.2923,
"step": 782
},
{
"epoch": 5.850746268656716,
"grad_norm": 10.722375869750977,
"learning_rate": 3.3886124930901054e-05,
"loss": 0.7797,
"step": 784
},
{
"epoch": 5.865671641791045,
"grad_norm": 4.297248363494873,
"learning_rate": 3.383084577114428e-05,
"loss": 0.5619,
"step": 786
},
{
"epoch": 5.880597014925373,
"grad_norm": 4.837419509887695,
"learning_rate": 3.377556661138751e-05,
"loss": 0.3356,
"step": 788
},
{
"epoch": 5.895522388059701,
"grad_norm": 1.2375975847244263,
"learning_rate": 3.372028745163074e-05,
"loss": 0.1544,
"step": 790
},
{
"epoch": 5.91044776119403,
"grad_norm": 12.756596565246582,
"learning_rate": 3.3665008291873966e-05,
"loss": 0.3809,
"step": 792
},
{
"epoch": 5.925373134328359,
"grad_norm": 3.4860129356384277,
"learning_rate": 3.3609729132117194e-05,
"loss": 0.3144,
"step": 794
},
{
"epoch": 5.940298507462686,
"grad_norm": 7.757724761962891,
"learning_rate": 3.355444997236042e-05,
"loss": 0.3695,
"step": 796
},
{
"epoch": 5.955223880597015,
"grad_norm": 2.4491782188415527,
"learning_rate": 3.349917081260365e-05,
"loss": 0.275,
"step": 798
},
{
"epoch": 5.970149253731344,
"grad_norm": 3.097658395767212,
"learning_rate": 3.344389165284688e-05,
"loss": 0.5223,
"step": 800
},
{
"epoch": 5.985074626865671,
"grad_norm": 3.9302120208740234,
"learning_rate": 3.338861249309011e-05,
"loss": 0.2202,
"step": 802
},
{
"epoch": 6.0,
"grad_norm": 4.732783794403076,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.4422,
"step": 804
},
{
"epoch": 6.0,
"eval_accuracy": 0.76,
"eval_f1_macro": 0.7178212262554823,
"eval_f1_micro": 0.76,
"eval_f1_weighted": 0.7341365415630122,
"eval_loss": 0.890067458152771,
"eval_precision_macro": 0.7468954248366012,
"eval_precision_micro": 0.76,
"eval_precision_weighted": 0.766861111111111,
"eval_recall_macro": 0.7448179271708684,
"eval_recall_micro": 0.76,
"eval_recall_weighted": 0.76,
"eval_runtime": 37.7852,
"eval_samples_per_second": 5.293,
"eval_steps_per_second": 0.185,
"step": 804
}
],
"logging_steps": 2,
"max_steps": 2010,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 844829150146560.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}