ardaspear's picture
Training in progress, step 374, checkpoint
0c3cdfd verified
raw
history blame
25 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1063406312197896,
"eval_steps": 34,
"global_step": 374,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002843332385555871,
"eval_loss": 1.5018354654312134,
"eval_runtime": 166.295,
"eval_samples_per_second": 35.623,
"eval_steps_per_second": 4.456,
"step": 1
},
{
"epoch": 0.0008529997156667614,
"grad_norm": 0.7210565209388733,
"learning_rate": 1.5e-05,
"loss": 1.5223,
"step": 3
},
{
"epoch": 0.0017059994313335229,
"grad_norm": 0.6832892298698425,
"learning_rate": 3e-05,
"loss": 1.5221,
"step": 6
},
{
"epoch": 0.0025589991470002845,
"grad_norm": 0.7228020429611206,
"learning_rate": 4.5e-05,
"loss": 1.408,
"step": 9
},
{
"epoch": 0.0034119988626670457,
"grad_norm": 0.6384875774383545,
"learning_rate": 4.999675562428437e-05,
"loss": 1.4404,
"step": 12
},
{
"epoch": 0.004264998578333807,
"grad_norm": 0.3351251482963562,
"learning_rate": 4.9979724954289244e-05,
"loss": 1.3736,
"step": 15
},
{
"epoch": 0.005117998294000569,
"grad_norm": 0.182565838098526,
"learning_rate": 4.994810682835951e-05,
"loss": 1.3079,
"step": 18
},
{
"epoch": 0.00597099800966733,
"grad_norm": 0.19113591313362122,
"learning_rate": 4.990191971059033e-05,
"loss": 1.316,
"step": 21
},
{
"epoch": 0.0068239977253340914,
"grad_norm": 0.1948336660861969,
"learning_rate": 4.984119057295783e-05,
"loss": 1.2426,
"step": 24
},
{
"epoch": 0.007676997441000853,
"grad_norm": 0.18684643507003784,
"learning_rate": 4.976595487956823e-05,
"loss": 1.2503,
"step": 27
},
{
"epoch": 0.008529997156667614,
"grad_norm": 0.18414735794067383,
"learning_rate": 4.967625656594782e-05,
"loss": 1.2323,
"step": 30
},
{
"epoch": 0.009382996872334376,
"grad_norm": 0.17294025421142578,
"learning_rate": 4.957214801338581e-05,
"loss": 1.1942,
"step": 33
},
{
"epoch": 0.009667330110889964,
"eval_loss": 1.1813108921051025,
"eval_runtime": 167.7438,
"eval_samples_per_second": 35.316,
"eval_steps_per_second": 4.417,
"step": 34
},
{
"epoch": 0.010235996588001138,
"grad_norm": 0.16702412068843842,
"learning_rate": 4.9453690018345144e-05,
"loss": 1.1981,
"step": 36
},
{
"epoch": 0.011088996303667898,
"grad_norm": 0.1968175172805786,
"learning_rate": 4.932095175695911e-05,
"loss": 1.1675,
"step": 39
},
{
"epoch": 0.01194199601933466,
"grad_norm": 0.18244469165802002,
"learning_rate": 4.917401074463441e-05,
"loss": 1.1584,
"step": 42
},
{
"epoch": 0.01279499573500142,
"grad_norm": 0.16749081015586853,
"learning_rate": 4.901295279078431e-05,
"loss": 1.1134,
"step": 45
},
{
"epoch": 0.013647995450668183,
"grad_norm": 0.17398597300052643,
"learning_rate": 4.883787194871841e-05,
"loss": 1.1139,
"step": 48
},
{
"epoch": 0.014500995166334945,
"grad_norm": 0.17164087295532227,
"learning_rate": 4.864887046071813e-05,
"loss": 1.079,
"step": 51
},
{
"epoch": 0.015353994882001705,
"grad_norm": 0.1644001007080078,
"learning_rate": 4.8446058698330115e-05,
"loss": 1.0646,
"step": 54
},
{
"epoch": 0.01620699459766847,
"grad_norm": 0.16490623354911804,
"learning_rate": 4.822955509791233e-05,
"loss": 1.0739,
"step": 57
},
{
"epoch": 0.017059994313335228,
"grad_norm": 0.17708458006381989,
"learning_rate": 4.799948609147061e-05,
"loss": 1.0897,
"step": 60
},
{
"epoch": 0.01791299402900199,
"grad_norm": 0.15597032010555267,
"learning_rate": 4.7755986032825864e-05,
"loss": 1.0566,
"step": 63
},
{
"epoch": 0.018765993744668752,
"grad_norm": 0.17728550732135773,
"learning_rate": 4.74991971191553e-05,
"loss": 1.0275,
"step": 66
},
{
"epoch": 0.019334660221779928,
"eval_loss": 1.0011852979660034,
"eval_runtime": 168.1174,
"eval_samples_per_second": 35.237,
"eval_steps_per_second": 4.408,
"step": 68
},
{
"epoch": 0.019618993460335514,
"grad_norm": 0.16994404792785645,
"learning_rate": 4.7229269307953235e-05,
"loss": 0.9841,
"step": 69
},
{
"epoch": 0.020471993176002276,
"grad_norm": 0.17181497812271118,
"learning_rate": 4.694636022946012e-05,
"loss": 0.9944,
"step": 72
},
{
"epoch": 0.021324992891669035,
"grad_norm": 0.21411621570587158,
"learning_rate": 4.665063509461097e-05,
"loss": 1.0013,
"step": 75
},
{
"epoch": 0.022177992607335797,
"grad_norm": 0.19594340026378632,
"learning_rate": 4.6342266598556814e-05,
"loss": 0.9969,
"step": 78
},
{
"epoch": 0.02303099232300256,
"grad_norm": 0.2057276964187622,
"learning_rate": 4.6021434819815555e-05,
"loss": 0.9808,
"step": 81
},
{
"epoch": 0.02388399203866932,
"grad_norm": 0.21778051555156708,
"learning_rate": 4.568832711511125e-05,
"loss": 0.9456,
"step": 84
},
{
"epoch": 0.024736991754336083,
"grad_norm": 0.21349306404590607,
"learning_rate": 4.534313800996299e-05,
"loss": 0.953,
"step": 87
},
{
"epoch": 0.02558999147000284,
"grad_norm": 0.21818219125270844,
"learning_rate": 4.498606908508754e-05,
"loss": 0.9133,
"step": 90
},
{
"epoch": 0.026442991185669604,
"grad_norm": 0.2510223388671875,
"learning_rate": 4.46173288586818e-05,
"loss": 0.9125,
"step": 93
},
{
"epoch": 0.027295990901336366,
"grad_norm": 0.24031595885753632,
"learning_rate": 4.4237132664654154e-05,
"loss": 0.8784,
"step": 96
},
{
"epoch": 0.028148990617003128,
"grad_norm": 0.2543729543685913,
"learning_rate": 4.384570252687542e-05,
"loss": 0.8984,
"step": 99
},
{
"epoch": 0.02900199033266989,
"grad_norm": 0.27020716667175293,
"learning_rate": 4.344326702952326e-05,
"loss": 0.8719,
"step": 102
},
{
"epoch": 0.02900199033266989,
"eval_loss": 0.8574855923652649,
"eval_runtime": 168.0651,
"eval_samples_per_second": 35.248,
"eval_steps_per_second": 4.409,
"step": 102
},
{
"epoch": 0.029854990048336652,
"grad_norm": 0.2670081853866577,
"learning_rate": 4.303006118359537e-05,
"loss": 0.8789,
"step": 105
},
{
"epoch": 0.03070798976400341,
"grad_norm": 0.3037591874599457,
"learning_rate": 4.260632628966974e-05,
"loss": 0.8526,
"step": 108
},
{
"epoch": 0.031560989479670176,
"grad_norm": 0.28440290689468384,
"learning_rate": 4.217230979699188e-05,
"loss": 0.8274,
"step": 111
},
{
"epoch": 0.03241398919533694,
"grad_norm": 0.49433842301368713,
"learning_rate": 4.172826515897146e-05,
"loss": 0.8104,
"step": 114
},
{
"epoch": 0.03326698891100369,
"grad_norm": 0.2911880314350128,
"learning_rate": 4.12744516851726e-05,
"loss": 0.8392,
"step": 117
},
{
"epoch": 0.034119988626670456,
"grad_norm": 0.3204285502433777,
"learning_rate": 4.0811134389884433e-05,
"loss": 0.788,
"step": 120
},
{
"epoch": 0.03497298834233722,
"grad_norm": 0.35650599002838135,
"learning_rate": 4.0338583837360225e-05,
"loss": 0.8004,
"step": 123
},
{
"epoch": 0.03582598805800398,
"grad_norm": 0.310162216424942,
"learning_rate": 3.985707598381544e-05,
"loss": 0.8085,
"step": 126
},
{
"epoch": 0.03667898777367074,
"grad_norm": 0.3370768129825592,
"learning_rate": 3.9366892016277096e-05,
"loss": 0.7317,
"step": 129
},
{
"epoch": 0.037531987489337504,
"grad_norm": 0.4600171446800232,
"learning_rate": 3.886831818837847e-05,
"loss": 0.7626,
"step": 132
},
{
"epoch": 0.038384987205004266,
"grad_norm": 0.43600377440452576,
"learning_rate": 3.8361645653195026e-05,
"loss": 0.7679,
"step": 135
},
{
"epoch": 0.038669320443559856,
"eval_loss": 0.7138456106185913,
"eval_runtime": 168.0686,
"eval_samples_per_second": 35.248,
"eval_steps_per_second": 4.409,
"step": 136
},
{
"epoch": 0.03923798692067103,
"grad_norm": 0.4538515508174896,
"learning_rate": 3.784717029321922e-05,
"loss": 0.7585,
"step": 138
},
{
"epoch": 0.04009098663633779,
"grad_norm": 0.5923985838890076,
"learning_rate": 3.732519254757344e-05,
"loss": 0.7211,
"step": 141
},
{
"epoch": 0.04094398635200455,
"grad_norm": 0.36592337489128113,
"learning_rate": 3.679601723656205e-05,
"loss": 0.6604,
"step": 144
},
{
"epoch": 0.04179698606767131,
"grad_norm": 0.44677990674972534,
"learning_rate": 3.625995338366492e-05,
"loss": 0.7846,
"step": 147
},
{
"epoch": 0.04264998578333807,
"grad_norm": 0.4921860098838806,
"learning_rate": 3.5717314035076355e-05,
"loss": 0.6709,
"step": 150
},
{
"epoch": 0.04350298549900483,
"grad_norm": 0.5457685589790344,
"learning_rate": 3.516841607689501e-05,
"loss": 0.6974,
"step": 153
},
{
"epoch": 0.044355985214671594,
"grad_norm": 0.46299344301223755,
"learning_rate": 3.461358005007128e-05,
"loss": 0.6643,
"step": 156
},
{
"epoch": 0.045208984930338356,
"grad_norm": 0.44877251982688904,
"learning_rate": 3.405312996322042e-05,
"loss": 0.6035,
"step": 159
},
{
"epoch": 0.04606198464600512,
"grad_norm": 0.3635808527469635,
"learning_rate": 3.348739310341068e-05,
"loss": 0.6781,
"step": 162
},
{
"epoch": 0.04691498436167188,
"grad_norm": 0.4172018766403198,
"learning_rate": 3.2916699845036816e-05,
"loss": 0.6195,
"step": 165
},
{
"epoch": 0.04776798407733864,
"grad_norm": 0.36372795701026917,
"learning_rate": 3.234138345689077e-05,
"loss": 0.6599,
"step": 168
},
{
"epoch": 0.048336650554449814,
"eval_loss": 0.606797456741333,
"eval_runtime": 168.0542,
"eval_samples_per_second": 35.251,
"eval_steps_per_second": 4.409,
"step": 170
},
{
"epoch": 0.048620983793005404,
"grad_norm": 0.4114479720592499,
"learning_rate": 3.17617799075421e-05,
"loss": 0.6287,
"step": 171
},
{
"epoch": 0.049473983508672166,
"grad_norm": 0.36733385920524597,
"learning_rate": 3.1178227669141744e-05,
"loss": 0.621,
"step": 174
},
{
"epoch": 0.05032698322433893,
"grad_norm": 0.3716995418071747,
"learning_rate": 3.0591067519763895e-05,
"loss": 0.5532,
"step": 177
},
{
"epoch": 0.05117998294000568,
"grad_norm": 0.4997945725917816,
"learning_rate": 3.0000642344401113e-05,
"loss": 0.6024,
"step": 180
},
{
"epoch": 0.052032982655672445,
"grad_norm": 0.43555817008018494,
"learning_rate": 2.9407296934729227e-05,
"loss": 0.5092,
"step": 183
},
{
"epoch": 0.05288598237133921,
"grad_norm": 0.49766504764556885,
"learning_rate": 2.8811377787758636e-05,
"loss": 0.6556,
"step": 186
},
{
"epoch": 0.05373898208700597,
"grad_norm": 0.6171467304229736,
"learning_rate": 2.8213232903489865e-05,
"loss": 0.5736,
"step": 189
},
{
"epoch": 0.05459198180267273,
"grad_norm": 0.8651450276374817,
"learning_rate": 2.761321158169134e-05,
"loss": 0.5568,
"step": 192
},
{
"epoch": 0.055444981518339494,
"grad_norm": 0.4486936926841736,
"learning_rate": 2.7011664217918154e-05,
"loss": 0.6145,
"step": 195
},
{
"epoch": 0.056297981234006256,
"grad_norm": 0.5348999500274658,
"learning_rate": 2.6408942098890936e-05,
"loss": 0.591,
"step": 198
},
{
"epoch": 0.05715098094967302,
"grad_norm": 0.4498997628688812,
"learning_rate": 2.580539719735433e-05,
"loss": 0.5572,
"step": 201
},
{
"epoch": 0.05800398066533978,
"grad_norm": 0.5493082404136658,
"learning_rate": 2.5201381966534748e-05,
"loss": 0.5316,
"step": 204
},
{
"epoch": 0.05800398066533978,
"eval_loss": 0.5341343879699707,
"eval_runtime": 167.9533,
"eval_samples_per_second": 35.272,
"eval_steps_per_second": 4.412,
"step": 204
},
{
"epoch": 0.05885698038100654,
"grad_norm": 0.47230827808380127,
"learning_rate": 2.459724913431772e-05,
"loss": 0.5152,
"step": 207
},
{
"epoch": 0.059709980096673304,
"grad_norm": 0.5665333867073059,
"learning_rate": 2.399335149726463e-05,
"loss": 0.5736,
"step": 210
},
{
"epoch": 0.06056297981234006,
"grad_norm": 0.47652244567871094,
"learning_rate": 2.3390041714589514e-05,
"loss": 0.5006,
"step": 213
},
{
"epoch": 0.06141597952800682,
"grad_norm": 0.7172293663024902,
"learning_rate": 2.2787672102216042e-05,
"loss": 0.4619,
"step": 216
},
{
"epoch": 0.06226897924367358,
"grad_norm": 0.5251888036727905,
"learning_rate": 2.2186594427034864e-05,
"loss": 0.4852,
"step": 219
},
{
"epoch": 0.06312197895934035,
"grad_norm": 0.45427563786506653,
"learning_rate": 2.1587159701481716e-05,
"loss": 0.4936,
"step": 222
},
{
"epoch": 0.06397497867500711,
"grad_norm": 0.49813351035118103,
"learning_rate": 2.098971797855599e-05,
"loss": 0.5062,
"step": 225
},
{
"epoch": 0.06482797839067388,
"grad_norm": 0.510427713394165,
"learning_rate": 2.0394618147399713e-05,
"loss": 0.4742,
"step": 228
},
{
"epoch": 0.06568097810634063,
"grad_norm": 0.5860615372657776,
"learning_rate": 1.980220772955602e-05,
"loss": 0.5472,
"step": 231
},
{
"epoch": 0.06653397782200739,
"grad_norm": 0.45956236124038696,
"learning_rate": 1.921283267602643e-05,
"loss": 0.5065,
"step": 234
},
{
"epoch": 0.06738697753767416,
"grad_norm": 0.46069616079330444,
"learning_rate": 1.8626837165245165e-05,
"loss": 0.4309,
"step": 237
},
{
"epoch": 0.06767131077622975,
"eval_loss": 0.4809924364089966,
"eval_runtime": 167.8669,
"eval_samples_per_second": 35.29,
"eval_steps_per_second": 4.414,
"step": 238
},
{
"epoch": 0.06823997725334091,
"grad_norm": 0.4818466603755951,
"learning_rate": 1.8044563402088684e-05,
"loss": 0.5623,
"step": 240
},
{
"epoch": 0.06909297696900768,
"grad_norm": 0.45440998673439026,
"learning_rate": 1.746635141803761e-05,
"loss": 0.5626,
"step": 243
},
{
"epoch": 0.06994597668467444,
"grad_norm": 0.5940297842025757,
"learning_rate": 1.6892538872607937e-05,
"loss": 0.5264,
"step": 246
},
{
"epoch": 0.0707989764003412,
"grad_norm": 0.41404712200164795,
"learning_rate": 1.6323460856167426e-05,
"loss": 0.4484,
"step": 249
},
{
"epoch": 0.07165197611600796,
"grad_norm": 0.44981393218040466,
"learning_rate": 1.5759449694252226e-05,
"loss": 0.473,
"step": 252
},
{
"epoch": 0.07250497583167473,
"grad_norm": 0.6551511883735657,
"learning_rate": 1.5200834753498128e-05,
"loss": 0.4956,
"step": 255
},
{
"epoch": 0.07335797554734148,
"grad_norm": 0.5032558441162109,
"learning_rate": 1.4647942249299707e-05,
"loss": 0.5262,
"step": 258
},
{
"epoch": 0.07421097526300825,
"grad_norm": 0.5430291891098022,
"learning_rate": 1.4101095055309746e-05,
"loss": 0.4899,
"step": 261
},
{
"epoch": 0.07506397497867501,
"grad_norm": 0.5020308494567871,
"learning_rate": 1.356061251489012e-05,
"loss": 0.494,
"step": 264
},
{
"epoch": 0.07591697469434176,
"grad_norm": 0.5282646417617798,
"learning_rate": 1.302681025462424e-05,
"loss": 0.5339,
"step": 267
},
{
"epoch": 0.07676997441000853,
"grad_norm": 0.48478028178215027,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.5133,
"step": 270
},
{
"epoch": 0.07733864088711971,
"eval_loss": 0.4457505941390991,
"eval_runtime": 168.037,
"eval_samples_per_second": 35.254,
"eval_steps_per_second": 4.41,
"step": 272
},
{
"epoch": 0.07762297412567529,
"grad_norm": 0.6064507961273193,
"learning_rate": 1.1980489393370938e-05,
"loss": 0.4708,
"step": 273
},
{
"epoch": 0.07847597384134206,
"grad_norm": 0.7535691261291504,
"learning_rate": 1.1468581814301717e-05,
"loss": 0.426,
"step": 276
},
{
"epoch": 0.07932897355700881,
"grad_norm": 0.5106021165847778,
"learning_rate": 1.096457620240298e-05,
"loss": 0.457,
"step": 279
},
{
"epoch": 0.08018197327267558,
"grad_norm": 0.7610855102539062,
"learning_rate": 1.0468766882759094e-05,
"loss": 0.3867,
"step": 282
},
{
"epoch": 0.08103497298834234,
"grad_norm": 0.57286137342453,
"learning_rate": 9.981443394050525e-06,
"loss": 0.4744,
"step": 285
},
{
"epoch": 0.0818879727040091,
"grad_norm": 0.6350270509719849,
"learning_rate": 9.502890319471491e-06,
"loss": 0.4382,
"step": 288
},
{
"epoch": 0.08274097241967586,
"grad_norm": 0.5878217816352844,
"learning_rate": 9.033387120541306e-06,
"loss": 0.4471,
"step": 291
},
{
"epoch": 0.08359397213534261,
"grad_norm": 0.4584065079689026,
"learning_rate": 8.573207973906735e-06,
"loss": 0.4223,
"step": 294
},
{
"epoch": 0.08444697185100938,
"grad_norm": 0.514761745929718,
"learning_rate": 8.1226216112306e-06,
"loss": 0.4439,
"step": 297
},
{
"epoch": 0.08529997156667614,
"grad_norm": 0.4604704678058624,
"learning_rate": 7.681891162260015e-06,
"loss": 0.4769,
"step": 300
},
{
"epoch": 0.08615297128234291,
"grad_norm": 0.5754848718643188,
"learning_rate": 7.251274001166044e-06,
"loss": 0.4719,
"step": 303
},
{
"epoch": 0.08700597099800966,
"grad_norm": 0.6474661827087402,
"learning_rate": 6.831021596244424e-06,
"loss": 0.3843,
"step": 306
},
{
"epoch": 0.08700597099800966,
"eval_loss": 0.4252224862575531,
"eval_runtime": 168.0333,
"eval_samples_per_second": 35.255,
"eval_steps_per_second": 4.41,
"step": 306
},
{
"epoch": 0.08785897071367643,
"grad_norm": 0.5052304863929749,
"learning_rate": 6.421379363065142e-06,
"loss": 0.3779,
"step": 309
},
{
"epoch": 0.08871197042934319,
"grad_norm": 0.5623005628585815,
"learning_rate": 6.022586521156715e-06,
"loss": 0.5177,
"step": 312
},
{
"epoch": 0.08956497014500996,
"grad_norm": 0.615669846534729,
"learning_rate": 5.634875954308638e-06,
"loss": 0.3902,
"step": 315
},
{
"epoch": 0.09041796986067671,
"grad_norm": 0.5363687872886658,
"learning_rate": 5.258474074573877e-06,
"loss": 0.3245,
"step": 318
},
{
"epoch": 0.09127096957634348,
"grad_norm": 0.45147329568862915,
"learning_rate": 4.893600690050579e-06,
"loss": 0.4372,
"step": 321
},
{
"epoch": 0.09212396929201024,
"grad_norm": 0.42790791392326355,
"learning_rate": 4.540468876520323e-06,
"loss": 0.4359,
"step": 324
},
{
"epoch": 0.09297696900767699,
"grad_norm": 0.512692928314209,
"learning_rate": 4.199284853017896e-06,
"loss": 0.41,
"step": 327
},
{
"epoch": 0.09382996872334376,
"grad_norm": 0.5399787425994873,
"learning_rate": 3.8702478614051355e-06,
"loss": 0.4455,
"step": 330
},
{
"epoch": 0.09468296843901051,
"grad_norm": 0.5106746554374695,
"learning_rate": 3.5535500500193357e-06,
"loss": 0.4301,
"step": 333
},
{
"epoch": 0.09553596815467728,
"grad_norm": 0.6958709955215454,
"learning_rate": 3.249376361464021e-06,
"loss": 0.3602,
"step": 336
},
{
"epoch": 0.09638896787034404,
"grad_norm": 0.5049157738685608,
"learning_rate": 2.957904424607652e-06,
"loss": 0.35,
"step": 339
},
{
"epoch": 0.09667330110889963,
"eval_loss": 0.4159228503704071,
"eval_runtime": 167.9747,
"eval_samples_per_second": 35.267,
"eval_steps_per_second": 4.411,
"step": 340
},
{
"epoch": 0.09724196758601081,
"grad_norm": 0.5438792705535889,
"learning_rate": 2.679304450853401e-06,
"loss": 0.4406,
"step": 342
},
{
"epoch": 0.09809496730167756,
"grad_norm": 0.6174736022949219,
"learning_rate": 2.4137391347404476e-06,
"loss": 0.4503,
"step": 345
},
{
"epoch": 0.09894796701734433,
"grad_norm": 0.5348644256591797,
"learning_rate": 2.1613635589349756e-06,
"loss": 0.5056,
"step": 348
},
{
"epoch": 0.09980096673301109,
"grad_norm": 0.44019100069999695,
"learning_rate": 1.922325103666281e-06,
"loss": 0.3926,
"step": 351
},
{
"epoch": 0.10065396644867786,
"grad_norm": 0.5055895447731018,
"learning_rate": 1.696763360660808e-06,
"loss": 0.5037,
"step": 354
},
{
"epoch": 0.10150696616434461,
"grad_norm": 0.5478758215904236,
"learning_rate": 1.4848100516245717e-06,
"loss": 0.3948,
"step": 357
},
{
"epoch": 0.10235996588001137,
"grad_norm": 0.5242781639099121,
"learning_rate": 1.286588951321363e-06,
"loss": 0.4522,
"step": 360
},
{
"epoch": 0.10321296559567814,
"grad_norm": 0.5367030501365662,
"learning_rate": 1.102215815291774e-06,
"loss": 0.4246,
"step": 363
},
{
"epoch": 0.10406596531134489,
"grad_norm": 0.5049583315849304,
"learning_rate": 9.317983122552332e-07,
"loss": 0.3515,
"step": 366
},
{
"epoch": 0.10491896502701166,
"grad_norm": 0.47986966371536255,
"learning_rate": 7.754359612344859e-07,
"loss": 0.3667,
"step": 369
},
{
"epoch": 0.10577196474267841,
"grad_norm": 0.5948217511177063,
"learning_rate": 6.332200734393057e-07,
"loss": 0.4115,
"step": 372
},
{
"epoch": 0.1063406312197896,
"eval_loss": 0.41257256269454956,
"eval_runtime": 168.1203,
"eval_samples_per_second": 35.237,
"eval_steps_per_second": 4.408,
"step": 374
}
],
"logging_steps": 3,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 34,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.197680065857454e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}