bbytxt's picture
Training in progress, step 200, checkpoint
26a587d verified
{
"best_metric": 11.913970947265625,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.18198362147406733,
"eval_steps": 25,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009099181073703367,
"grad_norm": 0.04135997220873833,
"learning_rate": 2.9999999999999997e-05,
"loss": 11.9246,
"step": 1
},
{
"epoch": 0.0009099181073703367,
"eval_loss": 11.937775611877441,
"eval_runtime": 0.3953,
"eval_samples_per_second": 126.499,
"eval_steps_per_second": 126.499,
"step": 1
},
{
"epoch": 0.0018198362147406734,
"grad_norm": 0.04857102036476135,
"learning_rate": 5.9999999999999995e-05,
"loss": 11.9222,
"step": 2
},
{
"epoch": 0.00272975432211101,
"grad_norm": 0.04188262298703194,
"learning_rate": 8.999999999999999e-05,
"loss": 11.9244,
"step": 3
},
{
"epoch": 0.003639672429481347,
"grad_norm": 0.0355207696557045,
"learning_rate": 0.00011999999999999999,
"loss": 11.9192,
"step": 4
},
{
"epoch": 0.004549590536851683,
"grad_norm": 0.03408201411366463,
"learning_rate": 0.00015,
"loss": 11.9284,
"step": 5
},
{
"epoch": 0.00545950864422202,
"grad_norm": 0.04698517918586731,
"learning_rate": 0.00017999999999999998,
"loss": 11.9289,
"step": 6
},
{
"epoch": 0.006369426751592357,
"grad_norm": 0.028339426964521408,
"learning_rate": 0.00020999999999999998,
"loss": 11.9301,
"step": 7
},
{
"epoch": 0.007279344858962694,
"grad_norm": 0.033868804574012756,
"learning_rate": 0.00023999999999999998,
"loss": 11.9284,
"step": 8
},
{
"epoch": 0.00818926296633303,
"grad_norm": 0.025452876463532448,
"learning_rate": 0.00027,
"loss": 11.9306,
"step": 9
},
{
"epoch": 0.009099181073703366,
"grad_norm": 0.030591776594519615,
"learning_rate": 0.0003,
"loss": 11.9325,
"step": 10
},
{
"epoch": 0.010009099181073703,
"grad_norm": 0.028602290898561478,
"learning_rate": 0.0002999794957488703,
"loss": 11.9304,
"step": 11
},
{
"epoch": 0.01091901728844404,
"grad_norm": 0.0217262115329504,
"learning_rate": 0.0002999179886011389,
"loss": 11.9296,
"step": 12
},
{
"epoch": 0.011828935395814377,
"grad_norm": 0.0400615856051445,
"learning_rate": 0.0002998154953722457,
"loss": 11.93,
"step": 13
},
{
"epoch": 0.012738853503184714,
"grad_norm": 0.025656167417764664,
"learning_rate": 0.00029967204408281613,
"loss": 11.9278,
"step": 14
},
{
"epoch": 0.01364877161055505,
"grad_norm": 0.03284378722310066,
"learning_rate": 0.00029948767395100045,
"loss": 11.9305,
"step": 15
},
{
"epoch": 0.014558689717925387,
"grad_norm": 0.03374679386615753,
"learning_rate": 0.0002992624353817517,
"loss": 11.9269,
"step": 16
},
{
"epoch": 0.015468607825295723,
"grad_norm": 0.03800738975405693,
"learning_rate": 0.0002989963899530457,
"loss": 11.9266,
"step": 17
},
{
"epoch": 0.01637852593266606,
"grad_norm": 0.03509436547756195,
"learning_rate": 0.00029868961039904624,
"loss": 11.931,
"step": 18
},
{
"epoch": 0.017288444040036398,
"grad_norm": 0.038303714245557785,
"learning_rate": 0.00029834218059022024,
"loss": 11.9259,
"step": 19
},
{
"epoch": 0.018198362147406732,
"grad_norm": 0.031731538474559784,
"learning_rate": 0.00029795419551040833,
"loss": 11.9318,
"step": 20
},
{
"epoch": 0.01910828025477707,
"grad_norm": 0.03439212962985039,
"learning_rate": 0.00029752576123085736,
"loss": 11.9271,
"step": 21
},
{
"epoch": 0.020018198362147407,
"grad_norm": 0.04004055634140968,
"learning_rate": 0.0002970569948812214,
"loss": 11.9296,
"step": 22
},
{
"epoch": 0.020928116469517744,
"grad_norm": 0.03706061467528343,
"learning_rate": 0.0002965480246175399,
"loss": 11.932,
"step": 23
},
{
"epoch": 0.02183803457688808,
"grad_norm": 0.02771821990609169,
"learning_rate": 0.0002959989895872009,
"loss": 11.9353,
"step": 24
},
{
"epoch": 0.022747952684258416,
"grad_norm": 0.029228534549474716,
"learning_rate": 0.0002954100398908995,
"loss": 11.9244,
"step": 25
},
{
"epoch": 0.022747952684258416,
"eval_loss": 11.93565559387207,
"eval_runtime": 0.4023,
"eval_samples_per_second": 124.295,
"eval_steps_per_second": 124.295,
"step": 25
},
{
"epoch": 0.023657870791628753,
"grad_norm": 0.034826189279556274,
"learning_rate": 0.0002947813365416023,
"loss": 11.9279,
"step": 26
},
{
"epoch": 0.02456778889899909,
"grad_norm": 0.03567393496632576,
"learning_rate": 0.0002941130514205272,
"loss": 11.9287,
"step": 27
},
{
"epoch": 0.025477707006369428,
"grad_norm": 0.038747385144233704,
"learning_rate": 0.0002934053672301536,
"loss": 11.9257,
"step": 28
},
{
"epoch": 0.026387625113739762,
"grad_norm": 0.046440187841653824,
"learning_rate": 0.00029265847744427303,
"loss": 11.9298,
"step": 29
},
{
"epoch": 0.0272975432211101,
"grad_norm": 0.035133399069309235,
"learning_rate": 0.00029187258625509513,
"loss": 11.9304,
"step": 30
},
{
"epoch": 0.028207461328480437,
"grad_norm": 0.032283294945955276,
"learning_rate": 0.00029104790851742417,
"loss": 11.9251,
"step": 31
},
{
"epoch": 0.029117379435850774,
"grad_norm": 0.03427290543913841,
"learning_rate": 0.0002901846696899191,
"loss": 11.9269,
"step": 32
},
{
"epoch": 0.03002729754322111,
"grad_norm": 0.04527918994426727,
"learning_rate": 0.00028928310577345606,
"loss": 11.9327,
"step": 33
},
{
"epoch": 0.030937215650591446,
"grad_norm": 0.05488235875964165,
"learning_rate": 0.0002883434632466077,
"loss": 11.929,
"step": 34
},
{
"epoch": 0.03184713375796178,
"grad_norm": 0.05063425749540329,
"learning_rate": 0.00028736599899825856,
"loss": 11.9281,
"step": 35
},
{
"epoch": 0.03275705186533212,
"grad_norm": 0.04788482189178467,
"learning_rate": 0.00028635098025737434,
"loss": 11.9271,
"step": 36
},
{
"epoch": 0.03366696997270246,
"grad_norm": 0.04777894541621208,
"learning_rate": 0.00028529868451994384,
"loss": 11.929,
"step": 37
},
{
"epoch": 0.034576888080072796,
"grad_norm": 0.05248137190937996,
"learning_rate": 0.0002842093994731145,
"loss": 11.9288,
"step": 38
},
{
"epoch": 0.03548680618744313,
"grad_norm": 0.053672585636377335,
"learning_rate": 0.00028308342291654174,
"loss": 11.9281,
"step": 39
},
{
"epoch": 0.036396724294813464,
"grad_norm": 0.03825056925415993,
"learning_rate": 0.00028192106268097334,
"loss": 11.9321,
"step": 40
},
{
"epoch": 0.0373066424021838,
"grad_norm": 0.05029003694653511,
"learning_rate": 0.00028072263654409154,
"loss": 11.9258,
"step": 41
},
{
"epoch": 0.03821656050955414,
"grad_norm": 0.035441625863313675,
"learning_rate": 0.0002794884721436361,
"loss": 11.9275,
"step": 42
},
{
"epoch": 0.039126478616924476,
"grad_norm": 0.05929926037788391,
"learning_rate": 0.00027821890688783083,
"loss": 11.9292,
"step": 43
},
{
"epoch": 0.040036396724294813,
"grad_norm": 0.06052996218204498,
"learning_rate": 0.0002769142878631403,
"loss": 11.9249,
"step": 44
},
{
"epoch": 0.04094631483166515,
"grad_norm": 0.0721125602722168,
"learning_rate": 0.00027557497173937923,
"loss": 11.9298,
"step": 45
},
{
"epoch": 0.04185623293903549,
"grad_norm": 0.057127729058265686,
"learning_rate": 0.000274201324672203,
"loss": 11.934,
"step": 46
},
{
"epoch": 0.042766151046405826,
"grad_norm": 0.06003405153751373,
"learning_rate": 0.00027279372220300385,
"loss": 11.9309,
"step": 47
},
{
"epoch": 0.04367606915377616,
"grad_norm": 0.05513525381684303,
"learning_rate": 0.0002713525491562421,
"loss": 11.9282,
"step": 48
},
{
"epoch": 0.044585987261146494,
"grad_norm": 0.05979841947555542,
"learning_rate": 0.00026987819953423867,
"loss": 11.9289,
"step": 49
},
{
"epoch": 0.04549590536851683,
"grad_norm": 0.07970727235078812,
"learning_rate": 0.00026837107640945905,
"loss": 11.929,
"step": 50
},
{
"epoch": 0.04549590536851683,
"eval_loss": 11.92931842803955,
"eval_runtime": 0.3938,
"eval_samples_per_second": 126.97,
"eval_steps_per_second": 126.97,
"step": 50
},
{
"epoch": 0.04640582347588717,
"grad_norm": 0.15377391874790192,
"learning_rate": 0.0002668315918143169,
"loss": 11.9129,
"step": 51
},
{
"epoch": 0.047315741583257506,
"grad_norm": 0.16208061575889587,
"learning_rate": 0.00026526016662852886,
"loss": 11.9072,
"step": 52
},
{
"epoch": 0.048225659690627844,
"grad_norm": 0.1508318930864334,
"learning_rate": 0.00026365723046405023,
"loss": 11.9114,
"step": 53
},
{
"epoch": 0.04913557779799818,
"grad_norm": 0.1259605437517166,
"learning_rate": 0.0002620232215476231,
"loss": 11.9135,
"step": 54
},
{
"epoch": 0.05004549590536852,
"grad_norm": 0.13642816245555878,
"learning_rate": 0.0002603585866009697,
"loss": 11.9171,
"step": 55
},
{
"epoch": 0.050955414012738856,
"grad_norm": 0.1361204981803894,
"learning_rate": 0.00025866378071866334,
"loss": 11.9173,
"step": 56
},
{
"epoch": 0.051865332120109194,
"grad_norm": 0.11698855459690094,
"learning_rate": 0.00025693926724370956,
"loss": 11.9176,
"step": 57
},
{
"epoch": 0.052775250227479524,
"grad_norm": 0.08528847247362137,
"learning_rate": 0.00025518551764087326,
"loss": 11.9185,
"step": 58
},
{
"epoch": 0.05368516833484986,
"grad_norm": 0.05039224773645401,
"learning_rate": 0.00025340301136778483,
"loss": 11.9163,
"step": 59
},
{
"epoch": 0.0545950864422202,
"grad_norm": 0.07579994201660156,
"learning_rate": 0.00025159223574386114,
"loss": 11.9175,
"step": 60
},
{
"epoch": 0.055505004549590536,
"grad_norm": 0.05966591835021973,
"learning_rate": 0.0002497536858170772,
"loss": 11.9148,
"step": 61
},
{
"epoch": 0.056414922656960874,
"grad_norm": 0.0829024538397789,
"learning_rate": 0.00024788786422862526,
"loss": 11.917,
"step": 62
},
{
"epoch": 0.05732484076433121,
"grad_norm": 0.07866007834672928,
"learning_rate": 0.00024599528107549745,
"loss": 11.9179,
"step": 63
},
{
"epoch": 0.05823475887170155,
"grad_norm": 0.06062201410531998,
"learning_rate": 0.00024407645377103054,
"loss": 11.919,
"step": 64
},
{
"epoch": 0.059144676979071886,
"grad_norm": 0.059842102229595184,
"learning_rate": 0.00024213190690345018,
"loss": 11.9171,
"step": 65
},
{
"epoch": 0.06005459508644222,
"grad_norm": 0.1073002815246582,
"learning_rate": 0.00024016217209245374,
"loss": 11.9169,
"step": 66
},
{
"epoch": 0.060964513193812554,
"grad_norm": 0.08621833473443985,
"learning_rate": 0.00023816778784387094,
"loss": 11.9157,
"step": 67
},
{
"epoch": 0.06187443130118289,
"grad_norm": 0.06708350777626038,
"learning_rate": 0.0002361492994024415,
"loss": 11.9198,
"step": 68
},
{
"epoch": 0.06278434940855324,
"grad_norm": 0.06917531043291092,
"learning_rate": 0.0002341072586027509,
"loss": 11.9187,
"step": 69
},
{
"epoch": 0.06369426751592357,
"grad_norm": 0.07202816754579544,
"learning_rate": 0.00023204222371836405,
"loss": 11.916,
"step": 70
},
{
"epoch": 0.0646041856232939,
"grad_norm": 0.07306147366762161,
"learning_rate": 0.00022995475930919905,
"loss": 11.9201,
"step": 71
},
{
"epoch": 0.06551410373066424,
"grad_norm": 0.0853971466422081,
"learning_rate": 0.00022784543606718227,
"loss": 11.9233,
"step": 72
},
{
"epoch": 0.06642402183803457,
"grad_norm": 0.07358640432357788,
"learning_rate": 0.00022571483066022657,
"loss": 11.9158,
"step": 73
},
{
"epoch": 0.06733393994540492,
"grad_norm": 0.0628897175192833,
"learning_rate": 0.0002235635255745762,
"loss": 11.9193,
"step": 74
},
{
"epoch": 0.06824385805277525,
"grad_norm": 0.07899107038974762,
"learning_rate": 0.00022139210895556104,
"loss": 11.9145,
"step": 75
},
{
"epoch": 0.06824385805277525,
"eval_loss": 11.921226501464844,
"eval_runtime": 0.3951,
"eval_samples_per_second": 126.559,
"eval_steps_per_second": 126.559,
"step": 75
},
{
"epoch": 0.06915377616014559,
"grad_norm": 0.06351622939109802,
"learning_rate": 0.00021920117444680317,
"loss": 11.9147,
"step": 76
},
{
"epoch": 0.07006369426751592,
"grad_norm": 0.10964340716600418,
"learning_rate": 0.00021699132102792097,
"loss": 11.9162,
"step": 77
},
{
"epoch": 0.07097361237488627,
"grad_norm": 0.0985313206911087,
"learning_rate": 0.0002147631528507739,
"loss": 11.9125,
"step": 78
},
{
"epoch": 0.0718835304822566,
"grad_norm": 0.08922101557254791,
"learning_rate": 0.00021251727907429355,
"loss": 11.914,
"step": 79
},
{
"epoch": 0.07279344858962693,
"grad_norm": 0.08533662557601929,
"learning_rate": 0.0002102543136979454,
"loss": 11.9112,
"step": 80
},
{
"epoch": 0.07370336669699727,
"grad_norm": 0.07660248875617981,
"learning_rate": 0.0002079748753938678,
"loss": 11.9204,
"step": 81
},
{
"epoch": 0.0746132848043676,
"grad_norm": 0.092041015625,
"learning_rate": 0.0002056795873377331,
"loss": 11.9157,
"step": 82
},
{
"epoch": 0.07552320291173795,
"grad_norm": 0.06479498744010925,
"learning_rate": 0.00020336907703837748,
"loss": 11.92,
"step": 83
},
{
"epoch": 0.07643312101910828,
"grad_norm": 0.07256922870874405,
"learning_rate": 0.00020104397616624645,
"loss": 11.9126,
"step": 84
},
{
"epoch": 0.07734303912647862,
"grad_norm": 0.07706281542778015,
"learning_rate": 0.00019870492038070252,
"loss": 11.9111,
"step": 85
},
{
"epoch": 0.07825295723384895,
"grad_norm": 0.0908479318022728,
"learning_rate": 0.0001963525491562421,
"loss": 11.9176,
"step": 86
},
{
"epoch": 0.0791628753412193,
"grad_norm": 0.0594383142888546,
"learning_rate": 0.0001939875056076697,
"loss": 11.9157,
"step": 87
},
{
"epoch": 0.08007279344858963,
"grad_norm": 0.06577011942863464,
"learning_rate": 0.00019161043631427666,
"loss": 11.9193,
"step": 88
},
{
"epoch": 0.08098271155595996,
"grad_norm": 0.07253321260213852,
"learning_rate": 0.00018922199114307294,
"loss": 11.9117,
"step": 89
},
{
"epoch": 0.0818926296633303,
"grad_norm": 0.08957824856042862,
"learning_rate": 0.00018682282307111987,
"loss": 11.9163,
"step": 90
},
{
"epoch": 0.08280254777070063,
"grad_norm": 0.0847165584564209,
"learning_rate": 0.00018441358800701273,
"loss": 11.9122,
"step": 91
},
{
"epoch": 0.08371246587807098,
"grad_norm": 0.0713360458612442,
"learning_rate": 0.00018199494461156203,
"loss": 11.9178,
"step": 92
},
{
"epoch": 0.08462238398544131,
"grad_norm": 0.08201546221971512,
"learning_rate": 0.000179567554117722,
"loss": 11.9134,
"step": 93
},
{
"epoch": 0.08553230209281165,
"grad_norm": 0.07122859358787537,
"learning_rate": 0.00017713208014981648,
"loss": 11.9171,
"step": 94
},
{
"epoch": 0.08644222020018198,
"grad_norm": 0.08683283627033234,
"learning_rate": 0.00017468918854211007,
"loss": 11.9146,
"step": 95
},
{
"epoch": 0.08735213830755233,
"grad_norm": 0.09123071283102036,
"learning_rate": 0.00017223954715677627,
"loss": 11.9156,
"step": 96
},
{
"epoch": 0.08826205641492266,
"grad_norm": 0.0987405776977539,
"learning_rate": 0.00016978382570131034,
"loss": 11.9176,
"step": 97
},
{
"epoch": 0.08917197452229299,
"grad_norm": 0.06329481303691864,
"learning_rate": 0.00016732269554543794,
"loss": 11.9224,
"step": 98
},
{
"epoch": 0.09008189262966333,
"grad_norm": 0.11751674115657806,
"learning_rate": 0.00016485682953756942,
"loss": 11.9276,
"step": 99
},
{
"epoch": 0.09099181073703366,
"grad_norm": 0.11672987043857574,
"learning_rate": 0.00016238690182084986,
"loss": 11.9208,
"step": 100
},
{
"epoch": 0.09099181073703366,
"eval_loss": 11.91674518585205,
"eval_runtime": 0.393,
"eval_samples_per_second": 127.218,
"eval_steps_per_second": 127.218,
"step": 100
},
{
"epoch": 0.09190172884440401,
"grad_norm": 0.1158631294965744,
"learning_rate": 0.0001599135876488549,
"loss": 11.9078,
"step": 101
},
{
"epoch": 0.09281164695177434,
"grad_norm": 0.14805841445922852,
"learning_rate": 0.00015743756320098332,
"loss": 11.9002,
"step": 102
},
{
"epoch": 0.09372156505914468,
"grad_norm": 0.09196311235427856,
"learning_rate": 0.0001549595053975962,
"loss": 11.9067,
"step": 103
},
{
"epoch": 0.09463148316651501,
"grad_norm": 0.10549457371234894,
"learning_rate": 0.00015248009171495378,
"loss": 11.9041,
"step": 104
},
{
"epoch": 0.09554140127388536,
"grad_norm": 0.07767681032419205,
"learning_rate": 0.00015,
"loss": 11.9133,
"step": 105
},
{
"epoch": 0.09645131938125569,
"grad_norm": 0.08533289283514023,
"learning_rate": 0.00014751990828504622,
"loss": 11.9087,
"step": 106
},
{
"epoch": 0.09736123748862602,
"grad_norm": 0.06854332238435745,
"learning_rate": 0.00014504049460240375,
"loss": 11.9119,
"step": 107
},
{
"epoch": 0.09827115559599636,
"grad_norm": 0.04668566957116127,
"learning_rate": 0.00014256243679901663,
"loss": 11.9142,
"step": 108
},
{
"epoch": 0.09918107370336669,
"grad_norm": 0.0475914292037487,
"learning_rate": 0.00014008641235114508,
"loss": 11.9127,
"step": 109
},
{
"epoch": 0.10009099181073704,
"grad_norm": 0.059128470718860626,
"learning_rate": 0.00013761309817915014,
"loss": 11.9116,
"step": 110
},
{
"epoch": 0.10100090991810737,
"grad_norm": 0.05030561983585358,
"learning_rate": 0.00013514317046243058,
"loss": 11.9064,
"step": 111
},
{
"epoch": 0.10191082802547771,
"grad_norm": 0.06432997435331345,
"learning_rate": 0.00013267730445456208,
"loss": 11.914,
"step": 112
},
{
"epoch": 0.10282074613284804,
"grad_norm": 0.051897771656513214,
"learning_rate": 0.00013021617429868963,
"loss": 11.9129,
"step": 113
},
{
"epoch": 0.10373066424021839,
"grad_norm": 0.03223414719104767,
"learning_rate": 0.00012776045284322368,
"loss": 11.9117,
"step": 114
},
{
"epoch": 0.10464058234758872,
"grad_norm": 0.05324099212884903,
"learning_rate": 0.00012531081145788987,
"loss": 11.9124,
"step": 115
},
{
"epoch": 0.10555050045495905,
"grad_norm": 0.06255560368299484,
"learning_rate": 0.00012286791985018355,
"loss": 11.9131,
"step": 116
},
{
"epoch": 0.10646041856232939,
"grad_norm": 0.062217820435762405,
"learning_rate": 0.00012043244588227796,
"loss": 11.9168,
"step": 117
},
{
"epoch": 0.10737033666969972,
"grad_norm": 0.04969204589724541,
"learning_rate": 0.00011800505538843798,
"loss": 11.9101,
"step": 118
},
{
"epoch": 0.10828025477707007,
"grad_norm": 0.07386729121208191,
"learning_rate": 0.00011558641199298727,
"loss": 11.917,
"step": 119
},
{
"epoch": 0.1091901728844404,
"grad_norm": 0.06895623356103897,
"learning_rate": 0.00011317717692888012,
"loss": 11.9105,
"step": 120
},
{
"epoch": 0.11010009099181074,
"grad_norm": 0.05684174597263336,
"learning_rate": 0.00011077800885692702,
"loss": 11.9136,
"step": 121
},
{
"epoch": 0.11101000909918107,
"grad_norm": 0.044148996472358704,
"learning_rate": 0.00010838956368572334,
"loss": 11.917,
"step": 122
},
{
"epoch": 0.11191992720655142,
"grad_norm": 0.0792914479970932,
"learning_rate": 0.0001060124943923303,
"loss": 11.9131,
"step": 123
},
{
"epoch": 0.11282984531392175,
"grad_norm": 0.051811669021844864,
"learning_rate": 0.0001036474508437579,
"loss": 11.9137,
"step": 124
},
{
"epoch": 0.11373976342129208,
"grad_norm": 0.05288928002119064,
"learning_rate": 0.00010129507961929748,
"loss": 11.9091,
"step": 125
},
{
"epoch": 0.11373976342129208,
"eval_loss": 11.915268898010254,
"eval_runtime": 0.3932,
"eval_samples_per_second": 127.166,
"eval_steps_per_second": 127.166,
"step": 125
},
{
"epoch": 0.11464968152866242,
"grad_norm": 0.06318605691194534,
"learning_rate": 9.895602383375353e-05,
"loss": 11.9102,
"step": 126
},
{
"epoch": 0.11555959963603275,
"grad_norm": 0.059707947075366974,
"learning_rate": 9.663092296162251e-05,
"loss": 11.9127,
"step": 127
},
{
"epoch": 0.1164695177434031,
"grad_norm": 0.03758562356233597,
"learning_rate": 9.432041266226686e-05,
"loss": 11.9113,
"step": 128
},
{
"epoch": 0.11737943585077343,
"grad_norm": 0.0917787030339241,
"learning_rate": 9.202512460613219e-05,
"loss": 11.9143,
"step": 129
},
{
"epoch": 0.11828935395814377,
"grad_norm": 0.05985008552670479,
"learning_rate": 8.97456863020546e-05,
"loss": 11.9191,
"step": 130
},
{
"epoch": 0.1191992720655141,
"grad_norm": 0.06727699935436249,
"learning_rate": 8.748272092570646e-05,
"loss": 11.9117,
"step": 131
},
{
"epoch": 0.12010919017288443,
"grad_norm": 0.07841650396585464,
"learning_rate": 8.523684714922608e-05,
"loss": 11.914,
"step": 132
},
{
"epoch": 0.12101910828025478,
"grad_norm": 0.052555058151483536,
"learning_rate": 8.300867897207903e-05,
"loss": 11.916,
"step": 133
},
{
"epoch": 0.12192902638762511,
"grad_norm": 0.04492655768990517,
"learning_rate": 8.079882555319684e-05,
"loss": 11.9157,
"step": 134
},
{
"epoch": 0.12283894449499545,
"grad_norm": 0.07871715724468231,
"learning_rate": 7.860789104443896e-05,
"loss": 11.913,
"step": 135
},
{
"epoch": 0.12374886260236578,
"grad_norm": 0.06570082902908325,
"learning_rate": 7.643647442542382e-05,
"loss": 11.9145,
"step": 136
},
{
"epoch": 0.12465878070973613,
"grad_norm": 0.06373663991689682,
"learning_rate": 7.428516933977347e-05,
"loss": 11.9204,
"step": 137
},
{
"epoch": 0.12556869881710647,
"grad_norm": 0.08079321682453156,
"learning_rate": 7.215456393281776e-05,
"loss": 11.9097,
"step": 138
},
{
"epoch": 0.1264786169244768,
"grad_norm": 0.07556037604808807,
"learning_rate": 7.004524069080096e-05,
"loss": 11.9106,
"step": 139
},
{
"epoch": 0.12738853503184713,
"grad_norm": 0.05594971776008606,
"learning_rate": 6.795777628163599e-05,
"loss": 11.912,
"step": 140
},
{
"epoch": 0.12829845313921748,
"grad_norm": 0.07762129604816437,
"learning_rate": 6.58927413972491e-05,
"loss": 11.9111,
"step": 141
},
{
"epoch": 0.1292083712465878,
"grad_norm": 0.05675530061125755,
"learning_rate": 6.385070059755846e-05,
"loss": 11.9126,
"step": 142
},
{
"epoch": 0.13011828935395814,
"grad_norm": 0.0633772760629654,
"learning_rate": 6.183221215612904e-05,
"loss": 11.9094,
"step": 143
},
{
"epoch": 0.13102820746132848,
"grad_norm": 0.06897582858800888,
"learning_rate": 5.983782790754623e-05,
"loss": 11.9121,
"step": 144
},
{
"epoch": 0.13193812556869883,
"grad_norm": 0.07465220987796783,
"learning_rate": 5.786809309654982e-05,
"loss": 11.9154,
"step": 145
},
{
"epoch": 0.13284804367606914,
"grad_norm": 0.06727467477321625,
"learning_rate": 5.592354622896944e-05,
"loss": 11.916,
"step": 146
},
{
"epoch": 0.1337579617834395,
"grad_norm": 0.08664090186357498,
"learning_rate": 5.40047189245025e-05,
"loss": 11.9114,
"step": 147
},
{
"epoch": 0.13466787989080983,
"grad_norm": 0.07395700365304947,
"learning_rate": 5.211213577137469e-05,
"loss": 11.9161,
"step": 148
},
{
"epoch": 0.13557779799818018,
"grad_norm": 0.0758979544043541,
"learning_rate": 5.024631418292274e-05,
"loss": 11.9158,
"step": 149
},
{
"epoch": 0.1364877161055505,
"grad_norm": 0.15526293218135834,
"learning_rate": 4.840776425613886e-05,
"loss": 11.9239,
"step": 150
},
{
"epoch": 0.1364877161055505,
"eval_loss": 11.914360046386719,
"eval_runtime": 0.3945,
"eval_samples_per_second": 126.748,
"eval_steps_per_second": 126.748,
"step": 150
},
{
"epoch": 0.13739763421292084,
"grad_norm": 0.12348955869674683,
"learning_rate": 4.659698863221513e-05,
"loss": 11.9017,
"step": 151
},
{
"epoch": 0.13830755232029118,
"grad_norm": 0.11510408669710159,
"learning_rate": 4.481448235912671e-05,
"loss": 11.9001,
"step": 152
},
{
"epoch": 0.1392174704276615,
"grad_norm": 0.08950791507959366,
"learning_rate": 4.306073275629044e-05,
"loss": 11.9053,
"step": 153
},
{
"epoch": 0.14012738853503184,
"grad_norm": 0.08536588400602341,
"learning_rate": 4.133621928133665e-05,
"loss": 11.9046,
"step": 154
},
{
"epoch": 0.1410373066424022,
"grad_norm": 0.09876493364572525,
"learning_rate": 3.964141339903026e-05,
"loss": 11.905,
"step": 155
},
{
"epoch": 0.14194722474977253,
"grad_norm": 0.11245915293693542,
"learning_rate": 3.797677845237696e-05,
"loss": 11.9043,
"step": 156
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.052722182124853134,
"learning_rate": 3.634276953594982e-05,
"loss": 11.9073,
"step": 157
},
{
"epoch": 0.1437670609645132,
"grad_norm": 0.07163731008768082,
"learning_rate": 3.473983337147118e-05,
"loss": 11.9085,
"step": 158
},
{
"epoch": 0.14467697907188354,
"grad_norm": 0.07469494640827179,
"learning_rate": 3.316840818568315e-05,
"loss": 11.9059,
"step": 159
},
{
"epoch": 0.14558689717925385,
"grad_norm": 0.06093104928731918,
"learning_rate": 3.162892359054098e-05,
"loss": 11.9057,
"step": 160
},
{
"epoch": 0.1464968152866242,
"grad_norm": 0.04979480803012848,
"learning_rate": 3.0121800465761293e-05,
"loss": 11.9111,
"step": 161
},
{
"epoch": 0.14740673339399454,
"grad_norm": 0.04737919196486473,
"learning_rate": 2.8647450843757897e-05,
"loss": 11.9108,
"step": 162
},
{
"epoch": 0.1483166515013649,
"grad_norm": 0.07306618988513947,
"learning_rate": 2.7206277796996144e-05,
"loss": 11.9118,
"step": 163
},
{
"epoch": 0.1492265696087352,
"grad_norm": 0.07405707985162735,
"learning_rate": 2.5798675327796993e-05,
"loss": 11.908,
"step": 164
},
{
"epoch": 0.15013648771610555,
"grad_norm": 0.06463942676782608,
"learning_rate": 2.4425028260620715e-05,
"loss": 11.9078,
"step": 165
},
{
"epoch": 0.1510464058234759,
"grad_norm": 0.05370442196726799,
"learning_rate": 2.3085712136859668e-05,
"loss": 11.9129,
"step": 166
},
{
"epoch": 0.15195632393084624,
"grad_norm": 0.07165276259183884,
"learning_rate": 2.178109311216913e-05,
"loss": 11.913,
"step": 167
},
{
"epoch": 0.15286624203821655,
"grad_norm": 0.049962639808654785,
"learning_rate": 2.0511527856363912e-05,
"loss": 11.9099,
"step": 168
},
{
"epoch": 0.1537761601455869,
"grad_norm": 0.04808368161320686,
"learning_rate": 1.927736345590839e-05,
"loss": 11.9139,
"step": 169
},
{
"epoch": 0.15468607825295724,
"grad_norm": 0.05554522946476936,
"learning_rate": 1.8078937319026654e-05,
"loss": 11.9162,
"step": 170
},
{
"epoch": 0.15559599636032756,
"grad_norm": 0.07787572592496872,
"learning_rate": 1.6916577083458228e-05,
"loss": 11.9158,
"step": 171
},
{
"epoch": 0.1565059144676979,
"grad_norm": 0.055409956723451614,
"learning_rate": 1.579060052688548e-05,
"loss": 11.9144,
"step": 172
},
{
"epoch": 0.15741583257506825,
"grad_norm": 0.09151246398687363,
"learning_rate": 1.4701315480056164e-05,
"loss": 11.9083,
"step": 173
},
{
"epoch": 0.1583257506824386,
"grad_norm": 0.07995638996362686,
"learning_rate": 1.3649019742625623e-05,
"loss": 11.9108,
"step": 174
},
{
"epoch": 0.1592356687898089,
"grad_norm": 0.04699774831533432,
"learning_rate": 1.2634001001741373e-05,
"loss": 11.9139,
"step": 175
},
{
"epoch": 0.1592356687898089,
"eval_loss": 11.914013862609863,
"eval_runtime": 0.3934,
"eval_samples_per_second": 127.105,
"eval_steps_per_second": 127.105,
"step": 175
},
{
"epoch": 0.16014558689717925,
"grad_norm": 0.0662156343460083,
"learning_rate": 1.1656536753392287e-05,
"loss": 11.916,
"step": 176
},
{
"epoch": 0.1610555050045496,
"grad_norm": 0.05447092279791832,
"learning_rate": 1.0716894226543953e-05,
"loss": 11.9107,
"step": 177
},
{
"epoch": 0.16196542311191992,
"grad_norm": 0.07941412925720215,
"learning_rate": 9.815330310080887e-06,
"loss": 11.9055,
"step": 178
},
{
"epoch": 0.16287534121929026,
"grad_norm": 0.05880994349718094,
"learning_rate": 8.952091482575824e-06,
"loss": 11.9072,
"step": 179
},
{
"epoch": 0.1637852593266606,
"grad_norm": 0.056635115295648575,
"learning_rate": 8.127413744904804e-06,
"loss": 11.9153,
"step": 180
},
{
"epoch": 0.16469517743403095,
"grad_norm": 0.0853646844625473,
"learning_rate": 7.34152255572697e-06,
"loss": 11.9109,
"step": 181
},
{
"epoch": 0.16560509554140126,
"grad_norm": 0.09290549904108047,
"learning_rate": 6.594632769846353e-06,
"loss": 11.9111,
"step": 182
},
{
"epoch": 0.1665150136487716,
"grad_norm": 0.05124865844845772,
"learning_rate": 5.886948579472778e-06,
"loss": 11.9119,
"step": 183
},
{
"epoch": 0.16742493175614195,
"grad_norm": 0.11257482320070267,
"learning_rate": 5.218663458397715e-06,
"loss": 11.9144,
"step": 184
},
{
"epoch": 0.16833484986351227,
"grad_norm": 0.061337102204561234,
"learning_rate": 4.589960109100444e-06,
"loss": 11.906,
"step": 185
},
{
"epoch": 0.16924476797088261,
"grad_norm": 0.055634934455156326,
"learning_rate": 4.001010412799138e-06,
"loss": 11.9124,
"step": 186
},
{
"epoch": 0.17015468607825296,
"grad_norm": 0.08984959125518799,
"learning_rate": 3.451975382460109e-06,
"loss": 11.9058,
"step": 187
},
{
"epoch": 0.1710646041856233,
"grad_norm": 0.09038890153169632,
"learning_rate": 2.9430051187785962e-06,
"loss": 11.916,
"step": 188
},
{
"epoch": 0.17197452229299362,
"grad_norm": 0.09575130045413971,
"learning_rate": 2.4742387691426445e-06,
"loss": 11.9111,
"step": 189
},
{
"epoch": 0.17288444040036396,
"grad_norm": 0.08616434037685394,
"learning_rate": 2.0458044895916513e-06,
"loss": 11.9045,
"step": 190
},
{
"epoch": 0.1737943585077343,
"grad_norm": 0.08434485644102097,
"learning_rate": 1.6578194097797258e-06,
"loss": 11.917,
"step": 191
},
{
"epoch": 0.17470427661510465,
"grad_norm": 0.06836060434579849,
"learning_rate": 1.3103896009537207e-06,
"loss": 11.9066,
"step": 192
},
{
"epoch": 0.17561419472247497,
"grad_norm": 0.05706486850976944,
"learning_rate": 1.0036100469542786e-06,
"loss": 11.9108,
"step": 193
},
{
"epoch": 0.17652411282984531,
"grad_norm": 0.08672205358743668,
"learning_rate": 7.375646182482875e-07,
"loss": 11.9167,
"step": 194
},
{
"epoch": 0.17743403093721566,
"grad_norm": 0.08131056278944016,
"learning_rate": 5.123260489995229e-07,
"loss": 11.9167,
"step": 195
},
{
"epoch": 0.17834394904458598,
"grad_norm": 0.07311462610960007,
"learning_rate": 3.2795591718381975e-07,
"loss": 11.9137,
"step": 196
},
{
"epoch": 0.17925386715195632,
"grad_norm": 0.07971935719251633,
"learning_rate": 1.8450462775428942e-07,
"loss": 11.913,
"step": 197
},
{
"epoch": 0.18016378525932666,
"grad_norm": 0.0999036505818367,
"learning_rate": 8.201139886109264e-08,
"loss": 11.9194,
"step": 198
},
{
"epoch": 0.181073703366697,
"grad_norm": 0.09455912560224533,
"learning_rate": 2.0504251129649374e-08,
"loss": 11.9173,
"step": 199
},
{
"epoch": 0.18198362147406733,
"grad_norm": 0.12160496413707733,
"learning_rate": 0.0,
"loss": 11.9234,
"step": 200
},
{
"epoch": 0.18198362147406733,
"eval_loss": 11.913970947265625,
"eval_runtime": 0.3964,
"eval_samples_per_second": 126.139,
"eval_steps_per_second": 126.139,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 318524620800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}