leixa's picture
Training in progress, step 500, checkpoint
785f7fb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.6286644951140063,
"eval_steps": 42,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003257328990228013,
"eval_loss": 6.045982360839844,
"eval_runtime": 22.8171,
"eval_samples_per_second": 22.658,
"eval_steps_per_second": 5.697,
"step": 1
},
{
"epoch": 0.009771986970684038,
"grad_norm": 7.921604633331299,
"learning_rate": 3e-05,
"loss": 5.6539,
"step": 3
},
{
"epoch": 0.019543973941368076,
"grad_norm": 8.00938606262207,
"learning_rate": 6e-05,
"loss": 4.9183,
"step": 6
},
{
"epoch": 0.029315960912052116,
"grad_norm": 7.276914596557617,
"learning_rate": 9e-05,
"loss": 3.1081,
"step": 9
},
{
"epoch": 0.03908794788273615,
"grad_norm": 4.77871036529541,
"learning_rate": 9.999588943391597e-05,
"loss": 2.5988,
"step": 12
},
{
"epoch": 0.048859934853420196,
"grad_norm": 4.304369926452637,
"learning_rate": 9.99743108100344e-05,
"loss": 2.3816,
"step": 15
},
{
"epoch": 0.05863192182410423,
"grad_norm": 4.264029502868652,
"learning_rate": 9.993424445916923e-05,
"loss": 2.302,
"step": 18
},
{
"epoch": 0.06840390879478828,
"grad_norm": 3.8603391647338867,
"learning_rate": 9.987570520365104e-05,
"loss": 2.205,
"step": 21
},
{
"epoch": 0.0781758957654723,
"grad_norm": 3.673312187194824,
"learning_rate": 9.979871469976196e-05,
"loss": 1.9937,
"step": 24
},
{
"epoch": 0.08794788273615635,
"grad_norm": 3.670795202255249,
"learning_rate": 9.970330142972401e-05,
"loss": 1.9628,
"step": 27
},
{
"epoch": 0.09771986970684039,
"grad_norm": 4.024224281311035,
"learning_rate": 9.95895006911623e-05,
"loss": 1.9128,
"step": 30
},
{
"epoch": 0.10749185667752444,
"grad_norm": 3.246854782104492,
"learning_rate": 9.945735458404681e-05,
"loss": 1.8845,
"step": 33
},
{
"epoch": 0.11726384364820847,
"grad_norm": 3.4581379890441895,
"learning_rate": 9.930691199511775e-05,
"loss": 1.7264,
"step": 36
},
{
"epoch": 0.1270358306188925,
"grad_norm": 3.0552656650543213,
"learning_rate": 9.91382285798002e-05,
"loss": 1.6868,
"step": 39
},
{
"epoch": 0.13680781758957655,
"grad_norm": 2.96836519241333,
"learning_rate": 9.895136674161465e-05,
"loss": 1.7595,
"step": 42
},
{
"epoch": 0.13680781758957655,
"eval_loss": 1.7990862131118774,
"eval_runtime": 22.1331,
"eval_samples_per_second": 23.359,
"eval_steps_per_second": 5.874,
"step": 42
},
{
"epoch": 0.1465798045602606,
"grad_norm": 2.9569833278656006,
"learning_rate": 9.874639560909117e-05,
"loss": 1.6317,
"step": 45
},
{
"epoch": 0.1563517915309446,
"grad_norm": 2.9722719192504883,
"learning_rate": 9.852339101019574e-05,
"loss": 1.4734,
"step": 48
},
{
"epoch": 0.16612377850162866,
"grad_norm": 3.663701295852661,
"learning_rate": 9.828243544427796e-05,
"loss": 1.7673,
"step": 51
},
{
"epoch": 0.1758957654723127,
"grad_norm": 2.7686305046081543,
"learning_rate": 9.802361805155097e-05,
"loss": 1.5428,
"step": 54
},
{
"epoch": 0.18566775244299674,
"grad_norm": 2.781644582748413,
"learning_rate": 9.774703458011453e-05,
"loss": 1.7697,
"step": 57
},
{
"epoch": 0.19543973941368079,
"grad_norm": 2.7254538536071777,
"learning_rate": 9.745278735053343e-05,
"loss": 1.5856,
"step": 60
},
{
"epoch": 0.20521172638436483,
"grad_norm": 2.544527769088745,
"learning_rate": 9.714098521798465e-05,
"loss": 1.5917,
"step": 63
},
{
"epoch": 0.21498371335504887,
"grad_norm": 3.226173162460327,
"learning_rate": 9.681174353198687e-05,
"loss": 1.5432,
"step": 66
},
{
"epoch": 0.2247557003257329,
"grad_norm": 2.7061498165130615,
"learning_rate": 9.64651840937276e-05,
"loss": 1.6754,
"step": 69
},
{
"epoch": 0.23452768729641693,
"grad_norm": 2.9291999340057373,
"learning_rate": 9.610143511100354e-05,
"loss": 1.5638,
"step": 72
},
{
"epoch": 0.24429967426710097,
"grad_norm": 2.4796836376190186,
"learning_rate": 9.572063115079063e-05,
"loss": 1.474,
"step": 75
},
{
"epoch": 0.254071661237785,
"grad_norm": 2.7676734924316406,
"learning_rate": 9.53229130894619e-05,
"loss": 1.5904,
"step": 78
},
{
"epoch": 0.26384364820846906,
"grad_norm": 2.8210036754608154,
"learning_rate": 9.490842806067095e-05,
"loss": 1.6858,
"step": 81
},
{
"epoch": 0.2736156351791531,
"grad_norm": 2.67210054397583,
"learning_rate": 9.44773294009206e-05,
"loss": 1.5434,
"step": 84
},
{
"epoch": 0.2736156351791531,
"eval_loss": 1.6323200464248657,
"eval_runtime": 22.1692,
"eval_samples_per_second": 23.321,
"eval_steps_per_second": 5.864,
"step": 84
},
{
"epoch": 0.28338762214983715,
"grad_norm": 2.819925308227539,
"learning_rate": 9.40297765928369e-05,
"loss": 1.5062,
"step": 87
},
{
"epoch": 0.2931596091205212,
"grad_norm": 2.894965410232544,
"learning_rate": 9.356593520616948e-05,
"loss": 1.5876,
"step": 90
},
{
"epoch": 0.30293159609120524,
"grad_norm": 2.7666544914245605,
"learning_rate": 9.308597683653975e-05,
"loss": 1.4585,
"step": 93
},
{
"epoch": 0.3127035830618892,
"grad_norm": 2.8919670581817627,
"learning_rate": 9.259007904196023e-05,
"loss": 1.5846,
"step": 96
},
{
"epoch": 0.32247557003257327,
"grad_norm": 2.660968542098999,
"learning_rate": 9.207842527714767e-05,
"loss": 1.5343,
"step": 99
},
{
"epoch": 0.3322475570032573,
"grad_norm": 2.652714967727661,
"learning_rate": 9.155120482565521e-05,
"loss": 1.591,
"step": 102
},
{
"epoch": 0.34201954397394135,
"grad_norm": 2.7949202060699463,
"learning_rate": 9.10086127298478e-05,
"loss": 1.4614,
"step": 105
},
{
"epoch": 0.3517915309446254,
"grad_norm": 2.527437686920166,
"learning_rate": 9.045084971874738e-05,
"loss": 1.5641,
"step": 108
},
{
"epoch": 0.36156351791530944,
"grad_norm": 2.352187395095825,
"learning_rate": 8.987812213377424e-05,
"loss": 1.4186,
"step": 111
},
{
"epoch": 0.3713355048859935,
"grad_norm": 2.577479124069214,
"learning_rate": 8.929064185241213e-05,
"loss": 1.4607,
"step": 114
},
{
"epoch": 0.3811074918566775,
"grad_norm": 2.7002623081207275,
"learning_rate": 8.868862620982534e-05,
"loss": 1.6182,
"step": 117
},
{
"epoch": 0.39087947882736157,
"grad_norm": 2.521603584289551,
"learning_rate": 8.807229791845673e-05,
"loss": 1.4524,
"step": 120
},
{
"epoch": 0.4006514657980456,
"grad_norm": 2.723996877670288,
"learning_rate": 8.744188498563641e-05,
"loss": 1.2303,
"step": 123
},
{
"epoch": 0.41042345276872966,
"grad_norm": 2.888556957244873,
"learning_rate": 8.679762062923175e-05,
"loss": 1.376,
"step": 126
},
{
"epoch": 0.41042345276872966,
"eval_loss": 1.5374845266342163,
"eval_runtime": 22.1691,
"eval_samples_per_second": 23.321,
"eval_steps_per_second": 5.864,
"step": 126
},
{
"epoch": 0.4201954397394137,
"grad_norm": 2.6070010662078857,
"learning_rate": 8.613974319136958e-05,
"loss": 1.313,
"step": 129
},
{
"epoch": 0.42996742671009774,
"grad_norm": 2.7287192344665527,
"learning_rate": 8.54684960502629e-05,
"loss": 1.4142,
"step": 132
},
{
"epoch": 0.43973941368078173,
"grad_norm": 2.9047019481658936,
"learning_rate": 8.478412753017433e-05,
"loss": 1.4128,
"step": 135
},
{
"epoch": 0.4495114006514658,
"grad_norm": 3.378373622894287,
"learning_rate": 8.408689080954998e-05,
"loss": 1.5116,
"step": 138
},
{
"epoch": 0.4592833876221498,
"grad_norm": 2.828160285949707,
"learning_rate": 8.33770438273574e-05,
"loss": 1.3085,
"step": 141
},
{
"epoch": 0.46905537459283386,
"grad_norm": 2.577202081680298,
"learning_rate": 8.265484918766243e-05,
"loss": 1.3892,
"step": 144
},
{
"epoch": 0.4788273615635179,
"grad_norm": 2.659550428390503,
"learning_rate": 8.192057406248028e-05,
"loss": 1.4546,
"step": 147
},
{
"epoch": 0.48859934853420195,
"grad_norm": 3.5885229110717773,
"learning_rate": 8.117449009293668e-05,
"loss": 1.2797,
"step": 150
},
{
"epoch": 0.498371335504886,
"grad_norm": 2.8732728958129883,
"learning_rate": 8.041687328877567e-05,
"loss": 1.405,
"step": 153
},
{
"epoch": 0.50814332247557,
"grad_norm": 2.7093594074249268,
"learning_rate": 7.964800392625129e-05,
"loss": 1.4728,
"step": 156
},
{
"epoch": 0.5179153094462541,
"grad_norm": 2.460803747177124,
"learning_rate": 7.886816644444098e-05,
"loss": 1.5428,
"step": 159
},
{
"epoch": 0.5276872964169381,
"grad_norm": 2.5754201412200928,
"learning_rate": 7.807764934001874e-05,
"loss": 1.332,
"step": 162
},
{
"epoch": 0.5374592833876222,
"grad_norm": 2.630692481994629,
"learning_rate": 7.727674506052743e-05,
"loss": 1.4519,
"step": 165
},
{
"epoch": 0.5472312703583062,
"grad_norm": 3.039989948272705,
"learning_rate": 7.646574989618938e-05,
"loss": 1.4669,
"step": 168
},
{
"epoch": 0.5472312703583062,
"eval_loss": 1.471885085105896,
"eval_runtime": 22.1606,
"eval_samples_per_second": 23.33,
"eval_steps_per_second": 5.866,
"step": 168
},
{
"epoch": 0.5570032573289903,
"grad_norm": 2.361525297164917,
"learning_rate": 7.564496387029532e-05,
"loss": 1.3612,
"step": 171
},
{
"epoch": 0.5667752442996743,
"grad_norm": 2.969438076019287,
"learning_rate": 7.481469062821252e-05,
"loss": 1.3524,
"step": 174
},
{
"epoch": 0.5765472312703583,
"grad_norm": 2.654615879058838,
"learning_rate": 7.39752373250527e-05,
"loss": 1.3684,
"step": 177
},
{
"epoch": 0.5863192182410424,
"grad_norm": 2.500892162322998,
"learning_rate": 7.312691451204178e-05,
"loss": 1.2688,
"step": 180
},
{
"epoch": 0.5960912052117264,
"grad_norm": 2.7902750968933105,
"learning_rate": 7.227003602163295e-05,
"loss": 1.213,
"step": 183
},
{
"epoch": 0.6058631921824105,
"grad_norm": 2.477710008621216,
"learning_rate": 7.14049188514063e-05,
"loss": 1.4632,
"step": 186
},
{
"epoch": 0.6156351791530945,
"grad_norm": 2.8017044067382812,
"learning_rate": 7.05318830467969e-05,
"loss": 1.4655,
"step": 189
},
{
"epoch": 0.6254071661237784,
"grad_norm": 2.2910585403442383,
"learning_rate": 6.965125158269619e-05,
"loss": 1.3622,
"step": 192
},
{
"epoch": 0.6351791530944625,
"grad_norm": 2.3485262393951416,
"learning_rate": 6.876335024396872e-05,
"loss": 1.3191,
"step": 195
},
{
"epoch": 0.6449511400651465,
"grad_norm": 2.221445083618164,
"learning_rate": 6.786850750493006e-05,
"loss": 1.2862,
"step": 198
},
{
"epoch": 0.6547231270358306,
"grad_norm": 2.2322473526000977,
"learning_rate": 6.696705440782938e-05,
"loss": 1.2873,
"step": 201
},
{
"epoch": 0.6644951140065146,
"grad_norm": 2.134915828704834,
"learning_rate": 6.605932444038229e-05,
"loss": 1.2457,
"step": 204
},
{
"epoch": 0.6742671009771987,
"grad_norm": 2.2600533962249756,
"learning_rate": 6.514565341239861e-05,
"loss": 1.3544,
"step": 207
},
{
"epoch": 0.6840390879478827,
"grad_norm": 2.104802370071411,
"learning_rate": 6.422637933155162e-05,
"loss": 1.2662,
"step": 210
},
{
"epoch": 0.6840390879478827,
"eval_loss": 1.3924102783203125,
"eval_runtime": 22.1429,
"eval_samples_per_second": 23.348,
"eval_steps_per_second": 5.871,
"step": 210
},
{
"epoch": 0.6938110749185668,
"grad_norm": 2.490628719329834,
"learning_rate": 6.330184227833376e-05,
"loss": 1.4418,
"step": 213
},
{
"epoch": 0.7035830618892508,
"grad_norm": 2.3577187061309814,
"learning_rate": 6.237238428024572e-05,
"loss": 1.2653,
"step": 216
},
{
"epoch": 0.7133550488599348,
"grad_norm": 2.6168723106384277,
"learning_rate": 6.143834918526527e-05,
"loss": 1.4725,
"step": 219
},
{
"epoch": 0.7231270358306189,
"grad_norm": 2.343191146850586,
"learning_rate": 6.0500082534642464e-05,
"loss": 1.3543,
"step": 222
},
{
"epoch": 0.7328990228013029,
"grad_norm": 2.482631206512451,
"learning_rate": 5.955793143506863e-05,
"loss": 1.3523,
"step": 225
},
{
"epoch": 0.742671009771987,
"grad_norm": 2.1662986278533936,
"learning_rate": 5.861224443026595e-05,
"loss": 1.2848,
"step": 228
},
{
"epoch": 0.752442996742671,
"grad_norm": 2.496674060821533,
"learning_rate": 5.766337137204579e-05,
"loss": 1.3919,
"step": 231
},
{
"epoch": 0.762214983713355,
"grad_norm": 2.5266311168670654,
"learning_rate": 5.6711663290882776e-05,
"loss": 1.4481,
"step": 234
},
{
"epoch": 0.7719869706840391,
"grad_norm": 2.154391288757324,
"learning_rate": 5.575747226605298e-05,
"loss": 1.1874,
"step": 237
},
{
"epoch": 0.7817589576547231,
"grad_norm": 2.2282350063323975,
"learning_rate": 5.480115129538409e-05,
"loss": 1.2684,
"step": 240
},
{
"epoch": 0.7915309446254072,
"grad_norm": 2.377781391143799,
"learning_rate": 5.384305416466584e-05,
"loss": 1.2627,
"step": 243
},
{
"epoch": 0.8013029315960912,
"grad_norm": 2.505831480026245,
"learning_rate": 5.288353531676873e-05,
"loss": 1.2982,
"step": 246
},
{
"epoch": 0.8110749185667753,
"grad_norm": 2.424248456954956,
"learning_rate": 5.192294972051992e-05,
"loss": 1.2661,
"step": 249
},
{
"epoch": 0.8208469055374593,
"grad_norm": 2.3504350185394287,
"learning_rate": 5.0961652739384356e-05,
"loss": 1.3146,
"step": 252
},
{
"epoch": 0.8208469055374593,
"eval_loss": 1.3527233600616455,
"eval_runtime": 22.1769,
"eval_samples_per_second": 23.313,
"eval_steps_per_second": 5.862,
"step": 252
},
{
"epoch": 0.8306188925081434,
"grad_norm": 2.505326509475708,
"learning_rate": 5e-05,
"loss": 1.3629,
"step": 255
},
{
"epoch": 0.8403908794788274,
"grad_norm": 2.5464847087860107,
"learning_rate": 4.903834726061565e-05,
"loss": 1.2965,
"step": 258
},
{
"epoch": 0.8501628664495114,
"grad_norm": 2.423649787902832,
"learning_rate": 4.807705027948008e-05,
"loss": 1.1622,
"step": 261
},
{
"epoch": 0.8599348534201955,
"grad_norm": 2.3888437747955322,
"learning_rate": 4.711646468323129e-05,
"loss": 1.2045,
"step": 264
},
{
"epoch": 0.8697068403908795,
"grad_norm": 2.202131509780884,
"learning_rate": 4.6156945835334184e-05,
"loss": 1.2004,
"step": 267
},
{
"epoch": 0.8794788273615635,
"grad_norm": 2.20468807220459,
"learning_rate": 4.5198848704615914e-05,
"loss": 1.2365,
"step": 270
},
{
"epoch": 0.8892508143322475,
"grad_norm": 2.211899995803833,
"learning_rate": 4.424252773394704e-05,
"loss": 1.3499,
"step": 273
},
{
"epoch": 0.8990228013029316,
"grad_norm": 2.4418487548828125,
"learning_rate": 4.328833670911724e-05,
"loss": 1.2556,
"step": 276
},
{
"epoch": 0.9087947882736156,
"grad_norm": 2.3675637245178223,
"learning_rate": 4.23366286279542e-05,
"loss": 1.1791,
"step": 279
},
{
"epoch": 0.9185667752442996,
"grad_norm": 2.2540576457977295,
"learning_rate": 4.138775556973406e-05,
"loss": 1.135,
"step": 282
},
{
"epoch": 0.9283387622149837,
"grad_norm": 2.869304656982422,
"learning_rate": 4.04420685649314e-05,
"loss": 1.3404,
"step": 285
},
{
"epoch": 0.9381107491856677,
"grad_norm": 2.2384979724884033,
"learning_rate": 3.9499917465357534e-05,
"loss": 1.184,
"step": 288
},
{
"epoch": 0.9478827361563518,
"grad_norm": 2.6150434017181396,
"learning_rate": 3.856165081473474e-05,
"loss": 1.398,
"step": 291
},
{
"epoch": 0.9576547231270358,
"grad_norm": 2.1370980739593506,
"learning_rate": 3.762761571975429e-05,
"loss": 1.0922,
"step": 294
},
{
"epoch": 0.9576547231270358,
"eval_loss": 1.2961455583572388,
"eval_runtime": 22.1652,
"eval_samples_per_second": 23.325,
"eval_steps_per_second": 5.865,
"step": 294
},
{
"epoch": 0.9674267100977199,
"grad_norm": 2.2964820861816406,
"learning_rate": 3.6698157721666246e-05,
"loss": 1.2606,
"step": 297
},
{
"epoch": 0.9771986970684039,
"grad_norm": 2.9193806648254395,
"learning_rate": 3.5773620668448384e-05,
"loss": 1.2477,
"step": 300
},
{
"epoch": 0.9869706840390879,
"grad_norm": 2.5898802280426025,
"learning_rate": 3.48543465876014e-05,
"loss": 1.2954,
"step": 303
},
{
"epoch": 0.996742671009772,
"grad_norm": 2.4339256286621094,
"learning_rate": 3.3940675559617724e-05,
"loss": 1.3758,
"step": 306
},
{
"epoch": 1.006514657980456,
"grad_norm": 2.0160953998565674,
"learning_rate": 3.303294559217063e-05,
"loss": 1.0917,
"step": 309
},
{
"epoch": 1.01628664495114,
"grad_norm": 2.0552120208740234,
"learning_rate": 3.213149249506997e-05,
"loss": 0.8775,
"step": 312
},
{
"epoch": 1.0260586319218241,
"grad_norm": 2.050333261489868,
"learning_rate": 3.12366497560313e-05,
"loss": 0.8579,
"step": 315
},
{
"epoch": 1.0358306188925082,
"grad_norm": 2.006868362426758,
"learning_rate": 3.0348748417303823e-05,
"loss": 0.8009,
"step": 318
},
{
"epoch": 1.0456026058631922,
"grad_norm": 2.201401710510254,
"learning_rate": 2.9468116953203107e-05,
"loss": 0.8379,
"step": 321
},
{
"epoch": 1.0553745928338762,
"grad_norm": 2.284052848815918,
"learning_rate": 2.8595081148593738e-05,
"loss": 0.7961,
"step": 324
},
{
"epoch": 1.0651465798045603,
"grad_norm": 2.104940414428711,
"learning_rate": 2.772996397836704e-05,
"loss": 0.8973,
"step": 327
},
{
"epoch": 1.0749185667752443,
"grad_norm": 2.4074976444244385,
"learning_rate": 2.687308548795825e-05,
"loss": 0.8669,
"step": 330
},
{
"epoch": 1.0846905537459284,
"grad_norm": 2.3930447101593018,
"learning_rate": 2.6024762674947313e-05,
"loss": 0.7428,
"step": 333
},
{
"epoch": 1.0944625407166124,
"grad_norm": 2.6202473640441895,
"learning_rate": 2.5185309371787513e-05,
"loss": 0.7865,
"step": 336
},
{
"epoch": 1.0944625407166124,
"eval_loss": 1.344246745109558,
"eval_runtime": 22.1704,
"eval_samples_per_second": 23.319,
"eval_steps_per_second": 5.864,
"step": 336
},
{
"epoch": 1.1042345276872965,
"grad_norm": 2.279667615890503,
"learning_rate": 2.43550361297047e-05,
"loss": 0.8684,
"step": 339
},
{
"epoch": 1.1140065146579805,
"grad_norm": 2.2219250202178955,
"learning_rate": 2.353425010381063e-05,
"loss": 0.8651,
"step": 342
},
{
"epoch": 1.1237785016286646,
"grad_norm": 2.2543816566467285,
"learning_rate": 2.272325493947257e-05,
"loss": 0.8661,
"step": 345
},
{
"epoch": 1.1335504885993486,
"grad_norm": 2.2331888675689697,
"learning_rate": 2.192235065998126e-05,
"loss": 0.7047,
"step": 348
},
{
"epoch": 1.1433224755700326,
"grad_norm": 2.6700170040130615,
"learning_rate": 2.1131833555559037e-05,
"loss": 0.7296,
"step": 351
},
{
"epoch": 1.1530944625407167,
"grad_norm": 2.4180705547332764,
"learning_rate": 2.0351996073748713e-05,
"loss": 0.7674,
"step": 354
},
{
"epoch": 1.1628664495114007,
"grad_norm": 2.7486183643341064,
"learning_rate": 1.9583126711224343e-05,
"loss": 0.9823,
"step": 357
},
{
"epoch": 1.1726384364820848,
"grad_norm": 2.257678747177124,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.8594,
"step": 360
},
{
"epoch": 1.1824104234527688,
"grad_norm": 2.3134396076202393,
"learning_rate": 1.807942593751973e-05,
"loss": 0.8651,
"step": 363
},
{
"epoch": 1.1921824104234529,
"grad_norm": 2.2541582584381104,
"learning_rate": 1.7345150812337564e-05,
"loss": 0.839,
"step": 366
},
{
"epoch": 1.201954397394137,
"grad_norm": 2.1207261085510254,
"learning_rate": 1.66229561726426e-05,
"loss": 0.7365,
"step": 369
},
{
"epoch": 1.211726384364821,
"grad_norm": 2.189333915710449,
"learning_rate": 1.5913109190450032e-05,
"loss": 0.7796,
"step": 372
},
{
"epoch": 1.221498371335505,
"grad_norm": 2.466726541519165,
"learning_rate": 1.5215872469825682e-05,
"loss": 0.8155,
"step": 375
},
{
"epoch": 1.231270358306189,
"grad_norm": 2.5175869464874268,
"learning_rate": 1.4531503949737108e-05,
"loss": 0.705,
"step": 378
},
{
"epoch": 1.231270358306189,
"eval_loss": 1.314825177192688,
"eval_runtime": 22.1791,
"eval_samples_per_second": 23.31,
"eval_steps_per_second": 5.861,
"step": 378
},
{
"epoch": 1.241042345276873,
"grad_norm": 2.3673043251037598,
"learning_rate": 1.3860256808630428e-05,
"loss": 0.8592,
"step": 381
},
{
"epoch": 1.2508143322475571,
"grad_norm": 2.451141357421875,
"learning_rate": 1.3202379370768252e-05,
"loss": 0.7932,
"step": 384
},
{
"epoch": 1.2605863192182412,
"grad_norm": 2.4431982040405273,
"learning_rate": 1.2558115014363592e-05,
"loss": 0.7265,
"step": 387
},
{
"epoch": 1.2703583061889252,
"grad_norm": 2.317182779312134,
"learning_rate": 1.1927702081543279e-05,
"loss": 0.7116,
"step": 390
},
{
"epoch": 1.2801302931596092,
"grad_norm": 2.2101731300354004,
"learning_rate": 1.1311373790174657e-05,
"loss": 0.7642,
"step": 393
},
{
"epoch": 1.2899022801302933,
"grad_norm": 2.8971657752990723,
"learning_rate": 1.0709358147587884e-05,
"loss": 0.7537,
"step": 396
},
{
"epoch": 1.2996742671009773,
"grad_norm": 2.7442216873168945,
"learning_rate": 1.0121877866225781e-05,
"loss": 0.765,
"step": 399
},
{
"epoch": 1.3094462540716614,
"grad_norm": 2.479196310043335,
"learning_rate": 9.549150281252633e-06,
"loss": 0.8282,
"step": 402
},
{
"epoch": 1.3192182410423452,
"grad_norm": 2.463113307952881,
"learning_rate": 8.991387270152201e-06,
"loss": 0.8097,
"step": 405
},
{
"epoch": 1.3289902280130292,
"grad_norm": 2.4927496910095215,
"learning_rate": 8.448795174344804e-06,
"loss": 0.8298,
"step": 408
},
{
"epoch": 1.3387622149837133,
"grad_norm": 2.711042881011963,
"learning_rate": 7.921574722852343e-06,
"loss": 0.7205,
"step": 411
},
{
"epoch": 1.3485342019543973,
"grad_norm": 2.803149938583374,
"learning_rate": 7.409920958039795e-06,
"loss": 0.7575,
"step": 414
},
{
"epoch": 1.3583061889250814,
"grad_norm": 2.474118232727051,
"learning_rate": 6.9140231634602485e-06,
"loss": 0.7231,
"step": 417
},
{
"epoch": 1.3680781758957654,
"grad_norm": 2.5949273109436035,
"learning_rate": 6.43406479383053e-06,
"loss": 0.8594,
"step": 420
},
{
"epoch": 1.3680781758957654,
"eval_loss": 1.3078192472457886,
"eval_runtime": 22.1879,
"eval_samples_per_second": 23.301,
"eval_steps_per_second": 5.859,
"step": 420
},
{
"epoch": 1.3778501628664495,
"grad_norm": 2.6252212524414062,
"learning_rate": 5.9702234071631e-06,
"loss": 0.694,
"step": 423
},
{
"epoch": 1.3876221498371335,
"grad_norm": 2.314903497695923,
"learning_rate": 5.5226705990794155e-06,
"loss": 0.7619,
"step": 426
},
{
"epoch": 1.3973941368078175,
"grad_norm": 2.5821728706359863,
"learning_rate": 5.091571939329048e-06,
"loss": 0.8219,
"step": 429
},
{
"epoch": 1.4071661237785016,
"grad_norm": 2.4021191596984863,
"learning_rate": 4.677086910538092e-06,
"loss": 0.6944,
"step": 432
},
{
"epoch": 1.4169381107491856,
"grad_norm": 2.5493106842041016,
"learning_rate": 4.279368849209381e-06,
"loss": 0.7945,
"step": 435
},
{
"epoch": 1.4267100977198697,
"grad_norm": 2.4301791191101074,
"learning_rate": 3.898564888996476e-06,
"loss": 0.6487,
"step": 438
},
{
"epoch": 1.4364820846905537,
"grad_norm": 2.63675856590271,
"learning_rate": 3.534815906272404e-06,
"loss": 0.8646,
"step": 441
},
{
"epoch": 1.4462540716612378,
"grad_norm": 2.5258288383483887,
"learning_rate": 3.18825646801314e-06,
"loss": 0.7682,
"step": 444
},
{
"epoch": 1.4560260586319218,
"grad_norm": 2.3839597702026367,
"learning_rate": 2.8590147820153513e-06,
"loss": 0.8041,
"step": 447
},
{
"epoch": 1.4657980456026058,
"grad_norm": 2.62052583694458,
"learning_rate": 2.547212649466568e-06,
"loss": 0.806,
"step": 450
},
{
"epoch": 1.47557003257329,
"grad_norm": 2.2883756160736084,
"learning_rate": 2.2529654198854835e-06,
"loss": 0.7404,
"step": 453
},
{
"epoch": 1.485342019543974,
"grad_norm": 2.6609508991241455,
"learning_rate": 1.9763819484490355e-06,
"loss": 0.7702,
"step": 456
},
{
"epoch": 1.495114006514658,
"grad_norm": 2.525723695755005,
"learning_rate": 1.7175645557220566e-06,
"loss": 0.769,
"step": 459
},
{
"epoch": 1.504885993485342,
"grad_norm": 2.766369581222534,
"learning_rate": 1.4766089898042678e-06,
"loss": 0.7819,
"step": 462
},
{
"epoch": 1.504885993485342,
"eval_loss": 1.304705262184143,
"eval_runtime": 22.2456,
"eval_samples_per_second": 23.241,
"eval_steps_per_second": 5.844,
"step": 462
},
{
"epoch": 1.514657980456026,
"grad_norm": 2.5057575702667236,
"learning_rate": 1.2536043909088191e-06,
"loss": 0.8524,
"step": 465
},
{
"epoch": 1.52442996742671,
"grad_norm": 2.9005579948425293,
"learning_rate": 1.0486332583853563e-06,
"loss": 0.8315,
"step": 468
},
{
"epoch": 1.5342019543973942,
"grad_norm": 2.7489733695983887,
"learning_rate": 8.617714201998084e-07,
"loss": 0.7929,
"step": 471
},
{
"epoch": 1.5439739413680782,
"grad_norm": 2.5570104122161865,
"learning_rate": 6.93088004882253e-07,
"loss": 0.9023,
"step": 474
},
{
"epoch": 1.5537459283387622,
"grad_norm": 2.6289138793945312,
"learning_rate": 5.426454159531913e-07,
"loss": 0.861,
"step": 477
},
{
"epoch": 1.5635179153094463,
"grad_norm": 2.8074800968170166,
"learning_rate": 4.104993088376974e-07,
"loss": 0.6996,
"step": 480
},
{
"epoch": 1.5732899022801303,
"grad_norm": 2.5680346488952637,
"learning_rate": 2.966985702759828e-07,
"loss": 0.9349,
"step": 483
},
{
"epoch": 1.5830618892508144,
"grad_norm": 2.4893345832824707,
"learning_rate": 2.012853002380466e-07,
"loss": 0.7033,
"step": 486
},
{
"epoch": 1.5928338762214984,
"grad_norm": 2.6643295288085938,
"learning_rate": 1.2429479634897267e-07,
"loss": 0.7915,
"step": 489
},
{
"epoch": 1.6026058631921825,
"grad_norm": 2.4373981952667236,
"learning_rate": 6.575554083078084e-08,
"loss": 0.803,
"step": 492
},
{
"epoch": 1.6123778501628665,
"grad_norm": 2.700291872024536,
"learning_rate": 2.568918996560532e-08,
"loss": 0.8002,
"step": 495
},
{
"epoch": 1.6221498371335505,
"grad_norm": 2.6518638134002686,
"learning_rate": 4.110566084036816e-09,
"loss": 0.7295,
"step": 498
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.770812536371282e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}