leixa's picture
Training in progress, step 500, checkpoint
9fbc5f2 verified
raw
history blame
29.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.026574188490718964,
"eval_steps": 42,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.314837698143793e-05,
"eval_loss": NaN,
"eval_runtime": 2370.1618,
"eval_samples_per_second": 13.37,
"eval_steps_per_second": 1.672,
"step": 1
},
{
"epoch": 0.0001594451309443138,
"grad_norm": NaN,
"learning_rate": 3e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.0003188902618886276,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.00047833539283294134,
"grad_norm": NaN,
"learning_rate": 9e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.0006377805237772552,
"grad_norm": NaN,
"learning_rate": 9.999588943391597e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.000797225654721569,
"grad_norm": NaN,
"learning_rate": 9.99743108100344e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.0009566707856658827,
"grad_norm": NaN,
"learning_rate": 9.993424445916923e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.0011161159166101966,
"grad_norm": NaN,
"learning_rate": 9.987570520365104e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.0012755610475545104,
"grad_norm": NaN,
"learning_rate": 9.979871469976196e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.0014350061784988242,
"grad_norm": NaN,
"learning_rate": 9.970330142972401e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.001594451309443138,
"grad_norm": NaN,
"learning_rate": 9.95895006911623e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.0017538964403874516,
"grad_norm": NaN,
"learning_rate": 9.945735458404681e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.0019133415713317654,
"grad_norm": NaN,
"learning_rate": 9.930691199511775e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.0020727867022760794,
"grad_norm": NaN,
"learning_rate": 9.91382285798002e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.002232231833220393,
"grad_norm": NaN,
"learning_rate": 9.895136674161465e-05,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.002232231833220393,
"eval_loss": NaN,
"eval_runtime": 2370.4375,
"eval_samples_per_second": 13.368,
"eval_steps_per_second": 1.671,
"step": 42
},
{
"epoch": 0.002391676964164707,
"grad_norm": NaN,
"learning_rate": 9.874639560909117e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.0025511220951090208,
"grad_norm": NaN,
"learning_rate": 9.852339101019574e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.0027105672260533346,
"grad_norm": NaN,
"learning_rate": 9.828243544427796e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.0028700123569976484,
"grad_norm": NaN,
"learning_rate": 9.802361805155097e-05,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.003029457487941962,
"grad_norm": NaN,
"learning_rate": 9.774703458011453e-05,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.003188902618886276,
"grad_norm": NaN,
"learning_rate": 9.745278735053343e-05,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.0033483477498305898,
"grad_norm": NaN,
"learning_rate": 9.714098521798465e-05,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.003507792880774903,
"grad_norm": NaN,
"learning_rate": 9.681174353198687e-05,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.003667238011719217,
"grad_norm": NaN,
"learning_rate": 9.64651840937276e-05,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.0038266831426635307,
"grad_norm": NaN,
"learning_rate": 9.610143511100354e-05,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.003986128273607845,
"grad_norm": NaN,
"learning_rate": 9.572063115079063e-05,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.004145573404552159,
"grad_norm": NaN,
"learning_rate": 9.53229130894619e-05,
"loss": 0.0,
"step": 78
},
{
"epoch": 0.0043050185354964726,
"grad_norm": NaN,
"learning_rate": 9.490842806067095e-05,
"loss": 0.0,
"step": 81
},
{
"epoch": 0.004464463666440786,
"grad_norm": NaN,
"learning_rate": 9.44773294009206e-05,
"loss": 0.0,
"step": 84
},
{
"epoch": 0.004464463666440786,
"eval_loss": NaN,
"eval_runtime": 2370.6582,
"eval_samples_per_second": 13.367,
"eval_steps_per_second": 1.671,
"step": 84
},
{
"epoch": 0.0046239087973851,
"grad_norm": NaN,
"learning_rate": 9.40297765928369e-05,
"loss": 0.0,
"step": 87
},
{
"epoch": 0.004783353928329414,
"grad_norm": NaN,
"learning_rate": 9.356593520616948e-05,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.004942799059273728,
"grad_norm": NaN,
"learning_rate": 9.308597683653975e-05,
"loss": 0.0,
"step": 93
},
{
"epoch": 0.0051022441902180415,
"grad_norm": NaN,
"learning_rate": 9.259007904196023e-05,
"loss": 0.0,
"step": 96
},
{
"epoch": 0.005261689321162355,
"grad_norm": NaN,
"learning_rate": 9.207842527714767e-05,
"loss": 0.0,
"step": 99
},
{
"epoch": 0.005421134452106669,
"grad_norm": NaN,
"learning_rate": 9.155120482565521e-05,
"loss": 0.0,
"step": 102
},
{
"epoch": 0.005580579583050983,
"grad_norm": NaN,
"learning_rate": 9.10086127298478e-05,
"loss": 0.0,
"step": 105
},
{
"epoch": 0.005740024713995297,
"grad_norm": NaN,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0,
"step": 108
},
{
"epoch": 0.0058994698449396105,
"grad_norm": NaN,
"learning_rate": 8.987812213377424e-05,
"loss": 0.0,
"step": 111
},
{
"epoch": 0.006058914975883924,
"grad_norm": NaN,
"learning_rate": 8.929064185241213e-05,
"loss": 0.0,
"step": 114
},
{
"epoch": 0.006218360106828238,
"grad_norm": NaN,
"learning_rate": 8.868862620982534e-05,
"loss": 0.0,
"step": 117
},
{
"epoch": 0.006377805237772552,
"grad_norm": NaN,
"learning_rate": 8.807229791845673e-05,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.006537250368716866,
"grad_norm": NaN,
"learning_rate": 8.744188498563641e-05,
"loss": 0.0,
"step": 123
},
{
"epoch": 0.0066966954996611795,
"grad_norm": NaN,
"learning_rate": 8.679762062923175e-05,
"loss": 0.0,
"step": 126
},
{
"epoch": 0.0066966954996611795,
"eval_loss": NaN,
"eval_runtime": 2369.9319,
"eval_samples_per_second": 13.371,
"eval_steps_per_second": 1.672,
"step": 126
},
{
"epoch": 0.0068561406306054925,
"grad_norm": NaN,
"learning_rate": 8.613974319136958e-05,
"loss": 0.0,
"step": 129
},
{
"epoch": 0.007015585761549806,
"grad_norm": NaN,
"learning_rate": 8.54684960502629e-05,
"loss": 0.0,
"step": 132
},
{
"epoch": 0.00717503089249412,
"grad_norm": NaN,
"learning_rate": 8.478412753017433e-05,
"loss": 0.0,
"step": 135
},
{
"epoch": 0.007334476023438434,
"grad_norm": NaN,
"learning_rate": 8.408689080954998e-05,
"loss": 0.0,
"step": 138
},
{
"epoch": 0.007493921154382748,
"grad_norm": NaN,
"learning_rate": 8.33770438273574e-05,
"loss": 0.0,
"step": 141
},
{
"epoch": 0.0076533662853270614,
"grad_norm": NaN,
"learning_rate": 8.265484918766243e-05,
"loss": 0.0,
"step": 144
},
{
"epoch": 0.007812811416271376,
"grad_norm": NaN,
"learning_rate": 8.192057406248028e-05,
"loss": 0.0,
"step": 147
},
{
"epoch": 0.00797225654721569,
"grad_norm": NaN,
"learning_rate": 8.117449009293668e-05,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.008131701678160004,
"grad_norm": NaN,
"learning_rate": 8.041687328877567e-05,
"loss": 0.0,
"step": 153
},
{
"epoch": 0.008291146809104318,
"grad_norm": NaN,
"learning_rate": 7.964800392625129e-05,
"loss": 0.0,
"step": 156
},
{
"epoch": 0.008450591940048631,
"grad_norm": NaN,
"learning_rate": 7.886816644444098e-05,
"loss": 0.0,
"step": 159
},
{
"epoch": 0.008610037070992945,
"grad_norm": NaN,
"learning_rate": 7.807764934001874e-05,
"loss": 0.0,
"step": 162
},
{
"epoch": 0.008769482201937259,
"grad_norm": NaN,
"learning_rate": 7.727674506052743e-05,
"loss": 0.0,
"step": 165
},
{
"epoch": 0.008928927332881573,
"grad_norm": NaN,
"learning_rate": 7.646574989618938e-05,
"loss": 0.0,
"step": 168
},
{
"epoch": 0.008928927332881573,
"eval_loss": NaN,
"eval_runtime": 2369.2438,
"eval_samples_per_second": 13.375,
"eval_steps_per_second": 1.672,
"step": 168
},
{
"epoch": 0.009088372463825886,
"grad_norm": NaN,
"learning_rate": 7.564496387029532e-05,
"loss": 0.0,
"step": 171
},
{
"epoch": 0.0092478175947702,
"grad_norm": NaN,
"learning_rate": 7.481469062821252e-05,
"loss": 0.0,
"step": 174
},
{
"epoch": 0.009407262725714514,
"grad_norm": NaN,
"learning_rate": 7.39752373250527e-05,
"loss": 0.0,
"step": 177
},
{
"epoch": 0.009566707856658828,
"grad_norm": NaN,
"learning_rate": 7.312691451204178e-05,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.009726152987603142,
"grad_norm": NaN,
"learning_rate": 7.227003602163295e-05,
"loss": 0.0,
"step": 183
},
{
"epoch": 0.009885598118547455,
"grad_norm": NaN,
"learning_rate": 7.14049188514063e-05,
"loss": 0.0,
"step": 186
},
{
"epoch": 0.01004504324949177,
"grad_norm": NaN,
"learning_rate": 7.05318830467969e-05,
"loss": 0.0,
"step": 189
},
{
"epoch": 0.010204488380436083,
"grad_norm": NaN,
"learning_rate": 6.965125158269619e-05,
"loss": 0.0,
"step": 192
},
{
"epoch": 0.010363933511380397,
"grad_norm": NaN,
"learning_rate": 6.876335024396872e-05,
"loss": 0.0,
"step": 195
},
{
"epoch": 0.01052337864232471,
"grad_norm": NaN,
"learning_rate": 6.786850750493006e-05,
"loss": 0.0,
"step": 198
},
{
"epoch": 0.010682823773269024,
"grad_norm": NaN,
"learning_rate": 6.696705440782938e-05,
"loss": 0.0,
"step": 201
},
{
"epoch": 0.010842268904213338,
"grad_norm": NaN,
"learning_rate": 6.605932444038229e-05,
"loss": 0.0,
"step": 204
},
{
"epoch": 0.011001714035157652,
"grad_norm": NaN,
"learning_rate": 6.514565341239861e-05,
"loss": 0.0,
"step": 207
},
{
"epoch": 0.011161159166101966,
"grad_norm": NaN,
"learning_rate": 6.422637933155162e-05,
"loss": 0.0,
"step": 210
},
{
"epoch": 0.011161159166101966,
"eval_loss": NaN,
"eval_runtime": 2369.0806,
"eval_samples_per_second": 13.376,
"eval_steps_per_second": 1.672,
"step": 210
},
{
"epoch": 0.01132060429704628,
"grad_norm": NaN,
"learning_rate": 6.330184227833376e-05,
"loss": 0.0,
"step": 213
},
{
"epoch": 0.011480049427990593,
"grad_norm": NaN,
"learning_rate": 6.237238428024572e-05,
"loss": 0.0,
"step": 216
},
{
"epoch": 0.011639494558934907,
"grad_norm": NaN,
"learning_rate": 6.143834918526527e-05,
"loss": 0.0,
"step": 219
},
{
"epoch": 0.011798939689879221,
"grad_norm": NaN,
"learning_rate": 6.0500082534642464e-05,
"loss": 0.0,
"step": 222
},
{
"epoch": 0.011958384820823535,
"grad_norm": NaN,
"learning_rate": 5.955793143506863e-05,
"loss": 0.0,
"step": 225
},
{
"epoch": 0.012117829951767849,
"grad_norm": NaN,
"learning_rate": 5.861224443026595e-05,
"loss": 0.0,
"step": 228
},
{
"epoch": 0.012277275082712162,
"grad_norm": NaN,
"learning_rate": 5.766337137204579e-05,
"loss": 0.0,
"step": 231
},
{
"epoch": 0.012436720213656476,
"grad_norm": NaN,
"learning_rate": 5.6711663290882776e-05,
"loss": 0.0,
"step": 234
},
{
"epoch": 0.01259616534460079,
"grad_norm": NaN,
"learning_rate": 5.575747226605298e-05,
"loss": 0.0,
"step": 237
},
{
"epoch": 0.012755610475545104,
"grad_norm": NaN,
"learning_rate": 5.480115129538409e-05,
"loss": 0.0,
"step": 240
},
{
"epoch": 0.012915055606489418,
"grad_norm": NaN,
"learning_rate": 5.384305416466584e-05,
"loss": 0.0,
"step": 243
},
{
"epoch": 0.013074500737433731,
"grad_norm": NaN,
"learning_rate": 5.288353531676873e-05,
"loss": 0.0,
"step": 246
},
{
"epoch": 0.013233945868378045,
"grad_norm": NaN,
"learning_rate": 5.192294972051992e-05,
"loss": 0.0,
"step": 249
},
{
"epoch": 0.013393390999322359,
"grad_norm": NaN,
"learning_rate": 5.0961652739384356e-05,
"loss": 0.0,
"step": 252
},
{
"epoch": 0.013393390999322359,
"eval_loss": NaN,
"eval_runtime": 2368.9779,
"eval_samples_per_second": 13.377,
"eval_steps_per_second": 1.672,
"step": 252
},
{
"epoch": 0.013552836130266673,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 255
},
{
"epoch": 0.013712281261210985,
"grad_norm": NaN,
"learning_rate": 4.903834726061565e-05,
"loss": 0.0,
"step": 258
},
{
"epoch": 0.013871726392155299,
"grad_norm": NaN,
"learning_rate": 4.807705027948008e-05,
"loss": 0.0,
"step": 261
},
{
"epoch": 0.014031171523099613,
"grad_norm": NaN,
"learning_rate": 4.711646468323129e-05,
"loss": 0.0,
"step": 264
},
{
"epoch": 0.014190616654043926,
"grad_norm": NaN,
"learning_rate": 4.6156945835334184e-05,
"loss": 0.0,
"step": 267
},
{
"epoch": 0.01435006178498824,
"grad_norm": NaN,
"learning_rate": 4.5198848704615914e-05,
"loss": 0.0,
"step": 270
},
{
"epoch": 0.014509506915932554,
"grad_norm": NaN,
"learning_rate": 4.424252773394704e-05,
"loss": 0.0,
"step": 273
},
{
"epoch": 0.014668952046876868,
"grad_norm": NaN,
"learning_rate": 4.328833670911724e-05,
"loss": 0.0,
"step": 276
},
{
"epoch": 0.014828397177821181,
"grad_norm": NaN,
"learning_rate": 4.23366286279542e-05,
"loss": 0.0,
"step": 279
},
{
"epoch": 0.014987842308765495,
"grad_norm": NaN,
"learning_rate": 4.138775556973406e-05,
"loss": 0.0,
"step": 282
},
{
"epoch": 0.015147287439709809,
"grad_norm": NaN,
"learning_rate": 4.04420685649314e-05,
"loss": 0.0,
"step": 285
},
{
"epoch": 0.015306732570654123,
"grad_norm": NaN,
"learning_rate": 3.9499917465357534e-05,
"loss": 0.0,
"step": 288
},
{
"epoch": 0.015466177701598437,
"grad_norm": NaN,
"learning_rate": 3.856165081473474e-05,
"loss": 0.0,
"step": 291
},
{
"epoch": 0.015625622832542752,
"grad_norm": NaN,
"learning_rate": 3.762761571975429e-05,
"loss": 0.0,
"step": 294
},
{
"epoch": 0.015625622832542752,
"eval_loss": NaN,
"eval_runtime": 2367.8949,
"eval_samples_per_second": 13.383,
"eval_steps_per_second": 1.673,
"step": 294
},
{
"epoch": 0.015785067963487064,
"grad_norm": NaN,
"learning_rate": 3.6698157721666246e-05,
"loss": 0.0,
"step": 297
},
{
"epoch": 0.01594451309443138,
"grad_norm": NaN,
"learning_rate": 3.5773620668448384e-05,
"loss": 0.0,
"step": 300
},
{
"epoch": 0.016103958225375692,
"grad_norm": NaN,
"learning_rate": 3.48543465876014e-05,
"loss": 0.0,
"step": 303
},
{
"epoch": 0.016263403356320007,
"grad_norm": NaN,
"learning_rate": 3.3940675559617724e-05,
"loss": 0.0,
"step": 306
},
{
"epoch": 0.01642284848726432,
"grad_norm": NaN,
"learning_rate": 3.303294559217063e-05,
"loss": 0.0,
"step": 309
},
{
"epoch": 0.016582293618208635,
"grad_norm": NaN,
"learning_rate": 3.213149249506997e-05,
"loss": 0.0,
"step": 312
},
{
"epoch": 0.016741738749152947,
"grad_norm": NaN,
"learning_rate": 3.12366497560313e-05,
"loss": 0.0,
"step": 315
},
{
"epoch": 0.016901183880097263,
"grad_norm": NaN,
"learning_rate": 3.0348748417303823e-05,
"loss": 0.0,
"step": 318
},
{
"epoch": 0.017060629011041575,
"grad_norm": NaN,
"learning_rate": 2.9468116953203107e-05,
"loss": 0.0,
"step": 321
},
{
"epoch": 0.01722007414198589,
"grad_norm": NaN,
"learning_rate": 2.8595081148593738e-05,
"loss": 0.0,
"step": 324
},
{
"epoch": 0.017379519272930202,
"grad_norm": NaN,
"learning_rate": 2.772996397836704e-05,
"loss": 0.0,
"step": 327
},
{
"epoch": 0.017538964403874518,
"grad_norm": NaN,
"learning_rate": 2.687308548795825e-05,
"loss": 0.0,
"step": 330
},
{
"epoch": 0.01769840953481883,
"grad_norm": NaN,
"learning_rate": 2.6024762674947313e-05,
"loss": 0.0,
"step": 333
},
{
"epoch": 0.017857854665763145,
"grad_norm": NaN,
"learning_rate": 2.5185309371787513e-05,
"loss": 0.0,
"step": 336
},
{
"epoch": 0.017857854665763145,
"eval_loss": NaN,
"eval_runtime": 2368.7091,
"eval_samples_per_second": 13.378,
"eval_steps_per_second": 1.673,
"step": 336
},
{
"epoch": 0.018017299796707457,
"grad_norm": NaN,
"learning_rate": 2.43550361297047e-05,
"loss": 0.0,
"step": 339
},
{
"epoch": 0.018176744927651773,
"grad_norm": NaN,
"learning_rate": 2.353425010381063e-05,
"loss": 0.0,
"step": 342
},
{
"epoch": 0.018336190058596085,
"grad_norm": NaN,
"learning_rate": 2.272325493947257e-05,
"loss": 0.0,
"step": 345
},
{
"epoch": 0.0184956351895404,
"grad_norm": NaN,
"learning_rate": 2.192235065998126e-05,
"loss": 0.0,
"step": 348
},
{
"epoch": 0.018655080320484713,
"grad_norm": NaN,
"learning_rate": 2.1131833555559037e-05,
"loss": 0.0,
"step": 351
},
{
"epoch": 0.018814525451429028,
"grad_norm": NaN,
"learning_rate": 2.0351996073748713e-05,
"loss": 0.0,
"step": 354
},
{
"epoch": 0.01897397058237334,
"grad_norm": NaN,
"learning_rate": 1.9583126711224343e-05,
"loss": 0.0,
"step": 357
},
{
"epoch": 0.019133415713317656,
"grad_norm": NaN,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.0,
"step": 360
},
{
"epoch": 0.019292860844261968,
"grad_norm": NaN,
"learning_rate": 1.807942593751973e-05,
"loss": 0.0,
"step": 363
},
{
"epoch": 0.019452305975206283,
"grad_norm": NaN,
"learning_rate": 1.7345150812337564e-05,
"loss": 0.0,
"step": 366
},
{
"epoch": 0.019611751106150595,
"grad_norm": NaN,
"learning_rate": 1.66229561726426e-05,
"loss": 0.0,
"step": 369
},
{
"epoch": 0.01977119623709491,
"grad_norm": NaN,
"learning_rate": 1.5913109190450032e-05,
"loss": 0.0,
"step": 372
},
{
"epoch": 0.019930641368039223,
"grad_norm": NaN,
"learning_rate": 1.5215872469825682e-05,
"loss": 0.0,
"step": 375
},
{
"epoch": 0.02009008649898354,
"grad_norm": NaN,
"learning_rate": 1.4531503949737108e-05,
"loss": 0.0,
"step": 378
},
{
"epoch": 0.02009008649898354,
"eval_loss": NaN,
"eval_runtime": 2368.4689,
"eval_samples_per_second": 13.38,
"eval_steps_per_second": 1.673,
"step": 378
},
{
"epoch": 0.02024953162992785,
"grad_norm": NaN,
"learning_rate": 1.3860256808630428e-05,
"loss": 0.0,
"step": 381
},
{
"epoch": 0.020408976760872166,
"grad_norm": NaN,
"learning_rate": 1.3202379370768252e-05,
"loss": 0.0,
"step": 384
},
{
"epoch": 0.020568421891816478,
"grad_norm": NaN,
"learning_rate": 1.2558115014363592e-05,
"loss": 0.0,
"step": 387
},
{
"epoch": 0.020727867022760794,
"grad_norm": NaN,
"learning_rate": 1.1927702081543279e-05,
"loss": 0.0,
"step": 390
},
{
"epoch": 0.020887312153705106,
"grad_norm": NaN,
"learning_rate": 1.1311373790174657e-05,
"loss": 0.0,
"step": 393
},
{
"epoch": 0.02104675728464942,
"grad_norm": NaN,
"learning_rate": 1.0709358147587884e-05,
"loss": 0.0,
"step": 396
},
{
"epoch": 0.021206202415593733,
"grad_norm": NaN,
"learning_rate": 1.0121877866225781e-05,
"loss": 0.0,
"step": 399
},
{
"epoch": 0.02136564754653805,
"grad_norm": NaN,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0,
"step": 402
},
{
"epoch": 0.02152509267748236,
"grad_norm": NaN,
"learning_rate": 8.991387270152201e-06,
"loss": 0.0,
"step": 405
},
{
"epoch": 0.021684537808426677,
"grad_norm": NaN,
"learning_rate": 8.448795174344804e-06,
"loss": 0.0,
"step": 408
},
{
"epoch": 0.02184398293937099,
"grad_norm": NaN,
"learning_rate": 7.921574722852343e-06,
"loss": 0.0,
"step": 411
},
{
"epoch": 0.022003428070315304,
"grad_norm": NaN,
"learning_rate": 7.409920958039795e-06,
"loss": 0.0,
"step": 414
},
{
"epoch": 0.022162873201259616,
"grad_norm": NaN,
"learning_rate": 6.9140231634602485e-06,
"loss": 0.0,
"step": 417
},
{
"epoch": 0.022322318332203932,
"grad_norm": NaN,
"learning_rate": 6.43406479383053e-06,
"loss": 0.0,
"step": 420
},
{
"epoch": 0.022322318332203932,
"eval_loss": NaN,
"eval_runtime": 2368.3875,
"eval_samples_per_second": 13.38,
"eval_steps_per_second": 1.673,
"step": 420
},
{
"epoch": 0.022481763463148244,
"grad_norm": NaN,
"learning_rate": 5.9702234071631e-06,
"loss": 0.0,
"step": 423
},
{
"epoch": 0.02264120859409256,
"grad_norm": NaN,
"learning_rate": 5.5226705990794155e-06,
"loss": 0.0,
"step": 426
},
{
"epoch": 0.02280065372503687,
"grad_norm": NaN,
"learning_rate": 5.091571939329048e-06,
"loss": 0.0,
"step": 429
},
{
"epoch": 0.022960098855981187,
"grad_norm": NaN,
"learning_rate": 4.677086910538092e-06,
"loss": 0.0,
"step": 432
},
{
"epoch": 0.0231195439869255,
"grad_norm": NaN,
"learning_rate": 4.279368849209381e-06,
"loss": 0.0,
"step": 435
},
{
"epoch": 0.023278989117869815,
"grad_norm": NaN,
"learning_rate": 3.898564888996476e-06,
"loss": 0.0,
"step": 438
},
{
"epoch": 0.023438434248814127,
"grad_norm": NaN,
"learning_rate": 3.534815906272404e-06,
"loss": 0.0,
"step": 441
},
{
"epoch": 0.023597879379758442,
"grad_norm": NaN,
"learning_rate": 3.18825646801314e-06,
"loss": 0.0,
"step": 444
},
{
"epoch": 0.023757324510702754,
"grad_norm": NaN,
"learning_rate": 2.8590147820153513e-06,
"loss": 0.0,
"step": 447
},
{
"epoch": 0.02391676964164707,
"grad_norm": NaN,
"learning_rate": 2.547212649466568e-06,
"loss": 0.0,
"step": 450
},
{
"epoch": 0.024076214772591382,
"grad_norm": NaN,
"learning_rate": 2.2529654198854835e-06,
"loss": 0.0,
"step": 453
},
{
"epoch": 0.024235659903535697,
"grad_norm": NaN,
"learning_rate": 1.9763819484490355e-06,
"loss": 0.0,
"step": 456
},
{
"epoch": 0.02439510503448001,
"grad_norm": NaN,
"learning_rate": 1.7175645557220566e-06,
"loss": 0.0,
"step": 459
},
{
"epoch": 0.024554550165424325,
"grad_norm": NaN,
"learning_rate": 1.4766089898042678e-06,
"loss": 0.0,
"step": 462
},
{
"epoch": 0.024554550165424325,
"eval_loss": NaN,
"eval_runtime": 2367.962,
"eval_samples_per_second": 13.382,
"eval_steps_per_second": 1.673,
"step": 462
},
{
"epoch": 0.024713995296368637,
"grad_norm": NaN,
"learning_rate": 1.2536043909088191e-06,
"loss": 0.0,
"step": 465
},
{
"epoch": 0.024873440427312953,
"grad_norm": NaN,
"learning_rate": 1.0486332583853563e-06,
"loss": 0.0,
"step": 468
},
{
"epoch": 0.025032885558257265,
"grad_norm": NaN,
"learning_rate": 8.617714201998084e-07,
"loss": 0.0,
"step": 471
},
{
"epoch": 0.02519233068920158,
"grad_norm": NaN,
"learning_rate": 6.93088004882253e-07,
"loss": 0.0,
"step": 474
},
{
"epoch": 0.025351775820145892,
"grad_norm": NaN,
"learning_rate": 5.426454159531913e-07,
"loss": 0.0,
"step": 477
},
{
"epoch": 0.025511220951090208,
"grad_norm": NaN,
"learning_rate": 4.104993088376974e-07,
"loss": 0.0,
"step": 480
},
{
"epoch": 0.02567066608203452,
"grad_norm": NaN,
"learning_rate": 2.966985702759828e-07,
"loss": 0.0,
"step": 483
},
{
"epoch": 0.025830111212978835,
"grad_norm": NaN,
"learning_rate": 2.012853002380466e-07,
"loss": 0.0,
"step": 486
},
{
"epoch": 0.025989556343923147,
"grad_norm": NaN,
"learning_rate": 1.2429479634897267e-07,
"loss": 0.0,
"step": 489
},
{
"epoch": 0.026149001474867463,
"grad_norm": NaN,
"learning_rate": 6.575554083078084e-08,
"loss": 0.0,
"step": 492
},
{
"epoch": 0.026308446605811775,
"grad_norm": NaN,
"learning_rate": 2.568918996560532e-08,
"loss": 0.0,
"step": 495
},
{
"epoch": 0.02646789173675609,
"grad_norm": NaN,
"learning_rate": 4.110566084036816e-09,
"loss": 0.0,
"step": 498
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.670791547922678e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}