REDL_LLM1_0 / checkpoint-10626 /trainer_state.json
YL95's picture
Upload folder using huggingface_hub
f0a9197
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 76.61883408071749,
"eval_steps": 500,
"global_step": 10626,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"learning_rate": 2.561117578579744e-06,
"loss": 1.3932,
"step": 44
},
{
"epoch": 0.39,
"learning_rate": 5.122235157159488e-06,
"loss": 1.3231,
"step": 88
},
{
"epoch": 0.59,
"learning_rate": 7.683352735739232e-06,
"loss": 1.3235,
"step": 132
},
{
"epoch": 1.17,
"learning_rate": 1.0244470314318976e-05,
"loss": 1.2242,
"step": 176
},
{
"epoch": 1.37,
"learning_rate": 1.2805587892898722e-05,
"loss": 1.1413,
"step": 220
},
{
"epoch": 1.57,
"learning_rate": 1.5366705471478464e-05,
"loss": 1.0589,
"step": 264
},
{
"epoch": 2.14,
"learning_rate": 1.7927823050058208e-05,
"loss": 0.8778,
"step": 308
},
{
"epoch": 2.34,
"learning_rate": 2.0488940628637952e-05,
"loss": 0.9339,
"step": 352
},
{
"epoch": 2.54,
"learning_rate": 2.3050058207217696e-05,
"loss": 0.8231,
"step": 396
},
{
"epoch": 3.12,
"learning_rate": 2.5611175785797444e-05,
"loss": 0.7623,
"step": 440
},
{
"epoch": 3.31,
"learning_rate": 2.8172293364377184e-05,
"loss": 0.7676,
"step": 484
},
{
"epoch": 3.51,
"learning_rate": 3.073341094295693e-05,
"loss": 0.7446,
"step": 528
},
{
"epoch": 4.09,
"learning_rate": 3.329452852153667e-05,
"loss": 0.7007,
"step": 572
},
{
"epoch": 4.29,
"learning_rate": 3.5855646100116416e-05,
"loss": 0.6539,
"step": 616
},
{
"epoch": 4.48,
"learning_rate": 3.841676367869616e-05,
"loss": 0.6692,
"step": 660
},
{
"epoch": 5.06,
"learning_rate": 4.0977881257275904e-05,
"loss": 0.6142,
"step": 704
},
{
"epoch": 5.26,
"learning_rate": 4.353899883585565e-05,
"loss": 0.5731,
"step": 748
},
{
"epoch": 5.46,
"learning_rate": 4.60419091967404e-05,
"loss": 0.6002,
"step": 792
},
{
"epoch": 6.04,
"learning_rate": 4.860302677532014e-05,
"loss": 0.6326,
"step": 836
},
{
"epoch": 6.23,
"learning_rate": 5.116414435389989e-05,
"loss": 0.5305,
"step": 880
},
{
"epoch": 6.43,
"learning_rate": 5.372526193247963e-05,
"loss": 0.5346,
"step": 924
},
{
"epoch": 7.01,
"learning_rate": 5.6286379511059375e-05,
"loss": 0.5423,
"step": 968
},
{
"epoch": 7.21,
"learning_rate": 5.884749708963911e-05,
"loss": 0.4862,
"step": 1012
},
{
"epoch": 7.4,
"learning_rate": 6.140861466821886e-05,
"loss": 0.5019,
"step": 1056
},
{
"epoch": 7.6,
"learning_rate": 6.396973224679861e-05,
"loss": 0.4905,
"step": 1100
},
{
"epoch": 8.18,
"learning_rate": 6.653084982537835e-05,
"loss": 0.4358,
"step": 1144
},
{
"epoch": 8.38,
"learning_rate": 6.90919674039581e-05,
"loss": 0.4369,
"step": 1188
},
{
"epoch": 8.57,
"learning_rate": 7.165308498253784e-05,
"loss": 0.4566,
"step": 1232
},
{
"epoch": 9.15,
"learning_rate": 7.421420256111758e-05,
"loss": 0.4201,
"step": 1276
},
{
"epoch": 9.35,
"learning_rate": 7.677532013969733e-05,
"loss": 0.3598,
"step": 1320
},
{
"epoch": 9.55,
"learning_rate": 7.933643771827707e-05,
"loss": 0.4108,
"step": 1364
},
{
"epoch": 10.13,
"learning_rate": 8.189755529685681e-05,
"loss": 0.3463,
"step": 1408
},
{
"epoch": 10.32,
"learning_rate": 8.445867287543656e-05,
"loss": 0.3552,
"step": 1452
},
{
"epoch": 10.52,
"learning_rate": 8.70197904540163e-05,
"loss": 0.3687,
"step": 1496
},
{
"epoch": 11.1,
"learning_rate": 8.958090803259605e-05,
"loss": 0.3342,
"step": 1540
},
{
"epoch": 11.3,
"learning_rate": 9.214202561117579e-05,
"loss": 0.31,
"step": 1584
},
{
"epoch": 11.49,
"learning_rate": 9.470314318975554e-05,
"loss": 0.2917,
"step": 1628
},
{
"epoch": 12.07,
"learning_rate": 9.726426076833528e-05,
"loss": 0.3055,
"step": 1672
},
{
"epoch": 12.27,
"learning_rate": 9.982537834691502e-05,
"loss": 0.2541,
"step": 1716
},
{
"epoch": 12.47,
"learning_rate": 9.973467935028797e-05,
"loss": 0.277,
"step": 1760
},
{
"epoch": 13.04,
"learning_rate": 9.944994499449946e-05,
"loss": 0.2611,
"step": 1804
},
{
"epoch": 13.24,
"learning_rate": 9.916521063871093e-05,
"loss": 0.208,
"step": 1848
},
{
"epoch": 13.44,
"learning_rate": 9.888047628292242e-05,
"loss": 0.2238,
"step": 1892
},
{
"epoch": 14.02,
"learning_rate": 9.85957419271339e-05,
"loss": 0.2341,
"step": 1936
},
{
"epoch": 14.22,
"learning_rate": 9.831100757134537e-05,
"loss": 0.1703,
"step": 1980
},
{
"epoch": 14.41,
"learning_rate": 9.802627321555685e-05,
"loss": 0.2005,
"step": 2024
},
{
"epoch": 14.61,
"learning_rate": 9.774153885976833e-05,
"loss": 0.1763,
"step": 2068
},
{
"epoch": 15.19,
"learning_rate": 9.745680450397982e-05,
"loss": 0.1496,
"step": 2112
},
{
"epoch": 15.39,
"learning_rate": 9.71720701481913e-05,
"loss": 0.1414,
"step": 2156
},
{
"epoch": 15.58,
"learning_rate": 9.688733579240278e-05,
"loss": 0.1597,
"step": 2200
},
{
"epoch": 16.16,
"learning_rate": 9.660260143661426e-05,
"loss": 0.1234,
"step": 2244
},
{
"epoch": 16.36,
"learning_rate": 9.631786708082572e-05,
"loss": 0.1305,
"step": 2288
},
{
"epoch": 16.56,
"learning_rate": 9.603313272503721e-05,
"loss": 0.1265,
"step": 2332
},
{
"epoch": 17.13,
"learning_rate": 9.574839836924869e-05,
"loss": 0.1109,
"step": 2376
},
{
"epoch": 17.33,
"learning_rate": 9.546366401346017e-05,
"loss": 0.0975,
"step": 2420
},
{
"epoch": 17.53,
"learning_rate": 9.517892965767166e-05,
"loss": 0.1044,
"step": 2464
},
{
"epoch": 18.11,
"learning_rate": 9.489419530188314e-05,
"loss": 0.1009,
"step": 2508
},
{
"epoch": 18.3,
"learning_rate": 9.460946094609462e-05,
"loss": 0.0852,
"step": 2552
},
{
"epoch": 18.5,
"learning_rate": 9.432472659030608e-05,
"loss": 0.0989,
"step": 2596
},
{
"epoch": 19.08,
"learning_rate": 9.403999223451757e-05,
"loss": 0.0802,
"step": 2640
},
{
"epoch": 19.28,
"learning_rate": 9.375525787872906e-05,
"loss": 0.0675,
"step": 2684
},
{
"epoch": 19.48,
"learning_rate": 9.347052352294053e-05,
"loss": 0.0803,
"step": 2728
},
{
"epoch": 20.05,
"learning_rate": 9.318578916715202e-05,
"loss": 0.0766,
"step": 2772
},
{
"epoch": 20.25,
"learning_rate": 9.29010548113635e-05,
"loss": 0.0601,
"step": 2816
},
{
"epoch": 20.45,
"learning_rate": 9.261632045557498e-05,
"loss": 0.0699,
"step": 2860
},
{
"epoch": 21.03,
"learning_rate": 9.233158609978645e-05,
"loss": 0.0648,
"step": 2904
},
{
"epoch": 21.22,
"learning_rate": 9.204685174399793e-05,
"loss": 0.0517,
"step": 2948
},
{
"epoch": 21.42,
"learning_rate": 9.176211738820942e-05,
"loss": 0.0609,
"step": 2992
},
{
"epoch": 21.62,
"learning_rate": 9.147738303242089e-05,
"loss": 0.0569,
"step": 3036
},
{
"epoch": 22.2,
"learning_rate": 9.119264867663238e-05,
"loss": 0.0463,
"step": 3080
},
{
"epoch": 22.39,
"learning_rate": 9.090791432084385e-05,
"loss": 0.051,
"step": 3124
},
{
"epoch": 22.59,
"learning_rate": 9.062317996505534e-05,
"loss": 0.0512,
"step": 3168
},
{
"epoch": 23.17,
"learning_rate": 9.033844560926681e-05,
"loss": 0.0422,
"step": 3212
},
{
"epoch": 23.37,
"learning_rate": 9.005371125347829e-05,
"loss": 0.0415,
"step": 3256
},
{
"epoch": 23.57,
"learning_rate": 8.976897689768977e-05,
"loss": 0.0466,
"step": 3300
},
{
"epoch": 24.14,
"learning_rate": 8.948424254190125e-05,
"loss": 0.0401,
"step": 3344
},
{
"epoch": 24.34,
"learning_rate": 8.919950818611274e-05,
"loss": 0.0386,
"step": 3388
},
{
"epoch": 24.54,
"learning_rate": 8.891477383032421e-05,
"loss": 0.0394,
"step": 3432
},
{
"epoch": 25.12,
"learning_rate": 8.86300394745357e-05,
"loss": 0.0368,
"step": 3476
},
{
"epoch": 25.31,
"learning_rate": 8.834530511874717e-05,
"loss": 0.0357,
"step": 3520
},
{
"epoch": 25.51,
"learning_rate": 8.806057076295865e-05,
"loss": 0.0372,
"step": 3564
},
{
"epoch": 26.09,
"learning_rate": 8.777583640717013e-05,
"loss": 0.0314,
"step": 3608
},
{
"epoch": 26.29,
"learning_rate": 8.749110205138161e-05,
"loss": 0.032,
"step": 3652
},
{
"epoch": 26.48,
"learning_rate": 8.72063676955931e-05,
"loss": 0.0324,
"step": 3696
},
{
"epoch": 27.06,
"learning_rate": 8.692163333980458e-05,
"loss": 0.0309,
"step": 3740
},
{
"epoch": 27.26,
"learning_rate": 8.663689898401606e-05,
"loss": 0.0296,
"step": 3784
},
{
"epoch": 27.46,
"learning_rate": 8.635216462822753e-05,
"loss": 0.0311,
"step": 3828
},
{
"epoch": 28.04,
"learning_rate": 8.6067430272439e-05,
"loss": 0.0304,
"step": 3872
},
{
"epoch": 28.23,
"learning_rate": 8.578269591665049e-05,
"loss": 0.0249,
"step": 3916
},
{
"epoch": 28.43,
"learning_rate": 8.549796156086198e-05,
"loss": 0.0267,
"step": 3960
},
{
"epoch": 29.01,
"learning_rate": 8.521322720507345e-05,
"loss": 0.0299,
"step": 4004
},
{
"epoch": 29.21,
"learning_rate": 8.492849284928494e-05,
"loss": 0.0222,
"step": 4048
},
{
"epoch": 29.4,
"learning_rate": 8.464375849349641e-05,
"loss": 0.0266,
"step": 4092
},
{
"epoch": 29.6,
"learning_rate": 8.435902413770789e-05,
"loss": 0.0293,
"step": 4136
},
{
"epoch": 30.18,
"learning_rate": 8.407428978191936e-05,
"loss": 0.0203,
"step": 4180
},
{
"epoch": 30.38,
"learning_rate": 8.378955542613085e-05,
"loss": 0.0239,
"step": 4224
},
{
"epoch": 30.57,
"learning_rate": 8.350482107034234e-05,
"loss": 0.0263,
"step": 4268
},
{
"epoch": 31.15,
"learning_rate": 8.322008671455381e-05,
"loss": 0.0197,
"step": 4312
},
{
"epoch": 31.35,
"learning_rate": 8.29353523587653e-05,
"loss": 0.0221,
"step": 4356
},
{
"epoch": 31.55,
"learning_rate": 8.265061800297677e-05,
"loss": 0.0225,
"step": 4400
},
{
"epoch": 32.13,
"learning_rate": 8.236588364718825e-05,
"loss": 0.0201,
"step": 4444
},
{
"epoch": 32.32,
"learning_rate": 8.208114929139973e-05,
"loss": 0.0204,
"step": 4488
},
{
"epoch": 32.52,
"learning_rate": 8.179641493561121e-05,
"loss": 0.0213,
"step": 4532
},
{
"epoch": 33.1,
"learning_rate": 8.15116805798227e-05,
"loss": 0.0189,
"step": 4576
},
{
"epoch": 33.3,
"learning_rate": 8.122694622403417e-05,
"loss": 0.0197,
"step": 4620
},
{
"epoch": 33.49,
"learning_rate": 8.094221186824566e-05,
"loss": 0.0201,
"step": 4664
},
{
"epoch": 34.07,
"learning_rate": 8.065747751245713e-05,
"loss": 0.0204,
"step": 4708
},
{
"epoch": 34.27,
"learning_rate": 8.037921439202744e-05,
"loss": 0.0176,
"step": 4752
},
{
"epoch": 34.47,
"learning_rate": 8.009448003623892e-05,
"loss": 0.0206,
"step": 4796
},
{
"epoch": 35.04,
"learning_rate": 7.980974568045039e-05,
"loss": 0.0175,
"step": 4840
},
{
"epoch": 35.24,
"learning_rate": 7.952501132466188e-05,
"loss": 0.017,
"step": 4884
},
{
"epoch": 35.44,
"learning_rate": 7.924027696887337e-05,
"loss": 0.0185,
"step": 4928
},
{
"epoch": 36.02,
"learning_rate": 7.895554261308484e-05,
"loss": 0.0168,
"step": 4972
},
{
"epoch": 36.22,
"learning_rate": 7.867080825729633e-05,
"loss": 0.0161,
"step": 5016
},
{
"epoch": 36.41,
"learning_rate": 7.83860739015078e-05,
"loss": 0.0166,
"step": 5060
},
{
"epoch": 36.61,
"learning_rate": 7.810133954571928e-05,
"loss": 0.0178,
"step": 5104
},
{
"epoch": 37.19,
"learning_rate": 7.781660518993076e-05,
"loss": 0.0151,
"step": 5148
},
{
"epoch": 37.39,
"learning_rate": 7.753187083414224e-05,
"loss": 0.0152,
"step": 5192
},
{
"epoch": 37.58,
"learning_rate": 7.724713647835373e-05,
"loss": 0.0167,
"step": 5236
},
{
"epoch": 38.16,
"learning_rate": 7.69624021225652e-05,
"loss": 0.0133,
"step": 5280
},
{
"epoch": 38.36,
"learning_rate": 7.667766776677669e-05,
"loss": 0.0164,
"step": 5324
},
{
"epoch": 38.56,
"learning_rate": 7.639293341098816e-05,
"loss": 0.016,
"step": 5368
},
{
"epoch": 39.13,
"learning_rate": 7.610819905519963e-05,
"loss": 0.0144,
"step": 5412
},
{
"epoch": 39.33,
"learning_rate": 7.582346469941112e-05,
"loss": 0.0133,
"step": 5456
},
{
"epoch": 39.53,
"learning_rate": 7.55387303436226e-05,
"loss": 0.0156,
"step": 5500
},
{
"epoch": 40.11,
"learning_rate": 7.525399598783408e-05,
"loss": 0.0142,
"step": 5544
},
{
"epoch": 40.3,
"learning_rate": 7.496926163204556e-05,
"loss": 0.0143,
"step": 5588
},
{
"epoch": 40.5,
"learning_rate": 7.468452727625704e-05,
"loss": 0.0128,
"step": 5632
},
{
"epoch": 41.08,
"learning_rate": 7.439979292046852e-05,
"loss": 0.0135,
"step": 5676
},
{
"epoch": 41.28,
"learning_rate": 7.411505856467999e-05,
"loss": 0.011,
"step": 5720
},
{
"epoch": 41.48,
"learning_rate": 7.383032420889148e-05,
"loss": 0.0132,
"step": 5764
},
{
"epoch": 42.05,
"learning_rate": 7.354558985310295e-05,
"loss": 0.0136,
"step": 5808
},
{
"epoch": 42.25,
"learning_rate": 7.326085549731444e-05,
"loss": 0.0112,
"step": 5852
},
{
"epoch": 42.45,
"learning_rate": 7.297612114152592e-05,
"loss": 0.0122,
"step": 5896
},
{
"epoch": 43.03,
"learning_rate": 7.26913867857374e-05,
"loss": 0.0138,
"step": 5940
},
{
"epoch": 43.22,
"learning_rate": 7.240665242994889e-05,
"loss": 0.0108,
"step": 5984
},
{
"epoch": 43.42,
"learning_rate": 7.212191807416036e-05,
"loss": 0.0119,
"step": 6028
},
{
"epoch": 43.62,
"learning_rate": 7.183718371837184e-05,
"loss": 0.0108,
"step": 6072
},
{
"epoch": 44.2,
"learning_rate": 7.155244936258331e-05,
"loss": 0.0105,
"step": 6116
},
{
"epoch": 44.39,
"learning_rate": 7.12677150067948e-05,
"loss": 0.0108,
"step": 6160
},
{
"epoch": 44.59,
"learning_rate": 7.098298065100629e-05,
"loss": 0.0117,
"step": 6204
},
{
"epoch": 45.17,
"learning_rate": 7.069824629521776e-05,
"loss": 0.0109,
"step": 6248
},
{
"epoch": 45.37,
"learning_rate": 7.041998317478807e-05,
"loss": 0.0108,
"step": 6292
},
{
"epoch": 45.57,
"learning_rate": 7.013524881899955e-05,
"loss": 0.012,
"step": 6336
},
{
"epoch": 46.14,
"learning_rate": 6.985051446321102e-05,
"loss": 0.0101,
"step": 6380
},
{
"epoch": 46.34,
"learning_rate": 6.956578010742251e-05,
"loss": 0.0096,
"step": 6424
},
{
"epoch": 46.54,
"learning_rate": 6.928104575163398e-05,
"loss": 0.0102,
"step": 6468
},
{
"epoch": 47.12,
"learning_rate": 6.899631139584547e-05,
"loss": 0.0091,
"step": 6512
},
{
"epoch": 47.31,
"learning_rate": 6.871157704005695e-05,
"loss": 0.0092,
"step": 6556
},
{
"epoch": 47.51,
"learning_rate": 6.842684268426843e-05,
"loss": 0.0096,
"step": 6600
},
{
"epoch": 48.09,
"learning_rate": 6.814210832847992e-05,
"loss": 0.0091,
"step": 6644
},
{
"epoch": 48.29,
"learning_rate": 6.785737397269138e-05,
"loss": 0.0091,
"step": 6688
},
{
"epoch": 48.48,
"learning_rate": 6.757263961690287e-05,
"loss": 0.0097,
"step": 6732
},
{
"epoch": 49.06,
"learning_rate": 6.728790526111434e-05,
"loss": 0.0104,
"step": 6776
},
{
"epoch": 49.26,
"learning_rate": 6.700317090532583e-05,
"loss": 0.0091,
"step": 6820
},
{
"epoch": 49.46,
"learning_rate": 6.671843654953732e-05,
"loss": 0.0087,
"step": 6864
},
{
"epoch": 50.04,
"learning_rate": 6.643370219374879e-05,
"loss": 0.0097,
"step": 6908
},
{
"epoch": 50.23,
"learning_rate": 6.614896783796028e-05,
"loss": 0.009,
"step": 6952
},
{
"epoch": 50.43,
"learning_rate": 6.586423348217174e-05,
"loss": 0.0087,
"step": 6996
},
{
"epoch": 51.01,
"learning_rate": 6.557949912638323e-05,
"loss": 0.009,
"step": 7040
},
{
"epoch": 51.21,
"learning_rate": 6.529476477059471e-05,
"loss": 0.0076,
"step": 7084
},
{
"epoch": 51.4,
"learning_rate": 6.501003041480619e-05,
"loss": 0.0083,
"step": 7128
},
{
"epoch": 51.6,
"learning_rate": 6.472529605901768e-05,
"loss": 0.0079,
"step": 7172
},
{
"epoch": 52.18,
"learning_rate": 6.444056170322915e-05,
"loss": 0.0075,
"step": 7216
},
{
"epoch": 52.38,
"learning_rate": 6.415582734744064e-05,
"loss": 0.0077,
"step": 7260
},
{
"epoch": 52.57,
"learning_rate": 6.38710929916521e-05,
"loss": 0.0069,
"step": 7304
},
{
"epoch": 53.15,
"learning_rate": 6.358635863586358e-05,
"loss": 0.0078,
"step": 7348
},
{
"epoch": 53.35,
"learning_rate": 6.330162428007507e-05,
"loss": 0.0077,
"step": 7392
},
{
"epoch": 53.55,
"learning_rate": 6.301688992428655e-05,
"loss": 0.0074,
"step": 7436
},
{
"epoch": 54.13,
"learning_rate": 6.273215556849803e-05,
"loss": 0.0071,
"step": 7480
},
{
"epoch": 54.32,
"learning_rate": 6.244742121270951e-05,
"loss": 0.0068,
"step": 7524
},
{
"epoch": 54.52,
"learning_rate": 6.2162686856921e-05,
"loss": 0.0081,
"step": 7568
},
{
"epoch": 55.1,
"learning_rate": 6.187795250113247e-05,
"loss": 0.0069,
"step": 7612
},
{
"epoch": 55.3,
"learning_rate": 6.159321814534394e-05,
"loss": 0.0064,
"step": 7656
},
{
"epoch": 55.49,
"learning_rate": 6.130848378955543e-05,
"loss": 0.0076,
"step": 7700
},
{
"epoch": 56.07,
"learning_rate": 6.102374943376691e-05,
"loss": 0.0067,
"step": 7744
},
{
"epoch": 56.27,
"learning_rate": 6.073901507797839e-05,
"loss": 0.0065,
"step": 7788
},
{
"epoch": 56.47,
"learning_rate": 6.045428072218987e-05,
"loss": 0.0067,
"step": 7832
},
{
"epoch": 57.04,
"learning_rate": 6.016954636640135e-05,
"loss": 0.0078,
"step": 7876
},
{
"epoch": 57.24,
"learning_rate": 5.9884812010612834e-05,
"loss": 0.0065,
"step": 7920
},
{
"epoch": 57.44,
"learning_rate": 5.96000776548243e-05,
"loss": 0.0074,
"step": 7964
},
{
"epoch": 58.02,
"learning_rate": 5.931534329903579e-05,
"loss": 0.0081,
"step": 8008
},
{
"epoch": 58.22,
"learning_rate": 5.903060894324727e-05,
"loss": 0.0071,
"step": 8052
},
{
"epoch": 58.41,
"learning_rate": 5.874587458745875e-05,
"loss": 0.0058,
"step": 8096
},
{
"epoch": 58.61,
"learning_rate": 5.846114023167023e-05,
"loss": 0.007,
"step": 8140
},
{
"epoch": 59.19,
"learning_rate": 5.817640587588171e-05,
"loss": 0.0054,
"step": 8184
},
{
"epoch": 59.39,
"learning_rate": 5.789167152009319e-05,
"loss": 0.0062,
"step": 8228
},
{
"epoch": 59.58,
"learning_rate": 5.7606937164304666e-05,
"loss": 0.0058,
"step": 8272
},
{
"epoch": 60.16,
"learning_rate": 5.732220280851615e-05,
"loss": 0.0056,
"step": 8316
},
{
"epoch": 60.36,
"learning_rate": 5.703746845272763e-05,
"loss": 0.0056,
"step": 8360
},
{
"epoch": 60.56,
"learning_rate": 5.675273409693911e-05,
"loss": 0.0061,
"step": 8404
},
{
"epoch": 61.13,
"learning_rate": 5.646799974115059e-05,
"loss": 0.0043,
"step": 8448
},
{
"epoch": 61.33,
"learning_rate": 5.618326538536207e-05,
"loss": 0.005,
"step": 8492
},
{
"epoch": 61.53,
"learning_rate": 5.589853102957355e-05,
"loss": 0.0055,
"step": 8536
},
{
"epoch": 62.11,
"learning_rate": 5.5613796673785025e-05,
"loss": 0.0051,
"step": 8580
},
{
"epoch": 62.3,
"learning_rate": 5.5329062317996505e-05,
"loss": 0.0049,
"step": 8624
},
{
"epoch": 62.5,
"learning_rate": 5.5044327962207986e-05,
"loss": 0.0062,
"step": 8668
},
{
"epoch": 63.08,
"learning_rate": 5.475959360641947e-05,
"loss": 0.0058,
"step": 8712
},
{
"epoch": 63.28,
"learning_rate": 5.447485925063095e-05,
"loss": 0.0069,
"step": 8756
},
{
"epoch": 63.48,
"learning_rate": 5.419012489484243e-05,
"loss": 0.0064,
"step": 8800
},
{
"epoch": 64.05,
"learning_rate": 5.3905390539053916e-05,
"loss": 0.006,
"step": 8844
},
{
"epoch": 64.25,
"learning_rate": 5.362065618326538e-05,
"loss": 0.0059,
"step": 8888
},
{
"epoch": 64.45,
"learning_rate": 5.3335921827476864e-05,
"loss": 0.005,
"step": 8932
},
{
"epoch": 65.03,
"learning_rate": 5.3051187471688344e-05,
"loss": 0.0066,
"step": 8976
},
{
"epoch": 65.22,
"learning_rate": 5.2766453115899825e-05,
"loss": 0.0054,
"step": 9020
},
{
"epoch": 65.42,
"learning_rate": 5.248171876011131e-05,
"loss": 0.0059,
"step": 9064
},
{
"epoch": 65.62,
"learning_rate": 5.219698440432279e-05,
"loss": 0.0059,
"step": 9108
},
{
"epoch": 66.2,
"learning_rate": 5.1912250048534274e-05,
"loss": 0.0059,
"step": 9152
},
{
"epoch": 66.39,
"learning_rate": 5.162751569274574e-05,
"loss": 0.0058,
"step": 9196
},
{
"epoch": 66.59,
"learning_rate": 5.134278133695722e-05,
"loss": 0.0059,
"step": 9240
},
{
"epoch": 67.17,
"learning_rate": 5.10580469811687e-05,
"loss": 0.0045,
"step": 9284
},
{
"epoch": 67.37,
"learning_rate": 5.077331262538019e-05,
"loss": 0.0054,
"step": 9328
},
{
"epoch": 67.57,
"learning_rate": 5.048857826959167e-05,
"loss": 0.0052,
"step": 9372
},
{
"epoch": 68.14,
"learning_rate": 5.020384391380315e-05,
"loss": 0.005,
"step": 9416
},
{
"epoch": 68.34,
"learning_rate": 4.9919109558014625e-05,
"loss": 0.0051,
"step": 9460
},
{
"epoch": 68.54,
"learning_rate": 4.9634375202226106e-05,
"loss": 0.005,
"step": 9504
},
{
"epoch": 69.12,
"learning_rate": 4.934964084643759e-05,
"loss": 0.0054,
"step": 9548
},
{
"epoch": 69.31,
"learning_rate": 4.906490649064907e-05,
"loss": 0.0055,
"step": 9592
},
{
"epoch": 69.51,
"learning_rate": 4.878017213486055e-05,
"loss": 0.0053,
"step": 9636
},
{
"epoch": 70.09,
"learning_rate": 4.849543777907203e-05,
"loss": 0.0051,
"step": 9680
},
{
"epoch": 70.29,
"learning_rate": 4.821070342328351e-05,
"loss": 0.0047,
"step": 9724
},
{
"epoch": 70.48,
"learning_rate": 4.7925969067494984e-05,
"loss": 0.0053,
"step": 9768
},
{
"epoch": 71.06,
"learning_rate": 4.7641234711706464e-05,
"loss": 0.0047,
"step": 9812
},
{
"epoch": 71.26,
"learning_rate": 4.735650035591795e-05,
"loss": 0.0049,
"step": 9856
},
{
"epoch": 71.46,
"learning_rate": 4.7071766000129426e-05,
"loss": 0.0049,
"step": 9900
},
{
"epoch": 72.04,
"learning_rate": 4.6787031644340907e-05,
"loss": 0.005,
"step": 9944
},
{
"epoch": 72.23,
"learning_rate": 4.650229728855239e-05,
"loss": 0.0038,
"step": 9988
},
{
"epoch": 72.43,
"learning_rate": 4.621756293276387e-05,
"loss": 0.0045,
"step": 10032
},
{
"epoch": 73.01,
"learning_rate": 4.593282857697534e-05,
"loss": 0.004,
"step": 10076
},
{
"epoch": 73.21,
"learning_rate": 4.564809422118683e-05,
"loss": 0.004,
"step": 10120
},
{
"epoch": 73.4,
"learning_rate": 4.536335986539831e-05,
"loss": 0.0042,
"step": 10164
},
{
"epoch": 73.6,
"learning_rate": 4.5078625509609784e-05,
"loss": 0.0048,
"step": 10208
},
{
"epoch": 74.18,
"learning_rate": 4.4793891153821265e-05,
"loss": 0.0038,
"step": 10252
},
{
"epoch": 74.38,
"learning_rate": 4.4509156798032745e-05,
"loss": 0.0034,
"step": 10296
},
{
"epoch": 74.57,
"learning_rate": 4.4224422442244226e-05,
"loss": 0.0044,
"step": 10340
},
{
"epoch": 75.15,
"learning_rate": 4.393968808645571e-05,
"loss": 0.0034,
"step": 10384
},
{
"epoch": 75.35,
"learning_rate": 4.365495373066719e-05,
"loss": 0.0036,
"step": 10428
},
{
"epoch": 75.55,
"learning_rate": 4.337021937487867e-05,
"loss": 0.0047,
"step": 10472
},
{
"epoch": 76.13,
"learning_rate": 4.308548501909014e-05,
"loss": 0.0034,
"step": 10516
},
{
"epoch": 76.32,
"learning_rate": 4.280075066330162e-05,
"loss": 0.0034,
"step": 10560
},
{
"epoch": 76.52,
"learning_rate": 4.2516016307513104e-05,
"loss": 0.0039,
"step": 10604
}
],
"logging_steps": 44,
"max_steps": 17171,
"num_train_epochs": 77,
"save_steps": 500,
"total_flos": 9.293451052135219e+17,
"trial_name": null,
"trial_params": null
}