nttx's picture
Training in progress, step 150, checkpoint
dc629b1 verified
{
"best_metric": 1.065322521753842e-06,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.22641509433962265,
"eval_steps": 25,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015094339622641509,
"grad_norm": 9.727989196777344,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.9383,
"step": 1
},
{
"epoch": 0.0015094339622641509,
"eval_loss": 5.682714939117432,
"eval_runtime": 4.6749,
"eval_samples_per_second": 10.695,
"eval_steps_per_second": 1.497,
"step": 1
},
{
"epoch": 0.0030188679245283017,
"grad_norm": 9.845698356628418,
"learning_rate": 5.9999999999999995e-05,
"loss": 3.161,
"step": 2
},
{
"epoch": 0.004528301886792453,
"grad_norm": 9.684377670288086,
"learning_rate": 8.999999999999999e-05,
"loss": 2.9913,
"step": 3
},
{
"epoch": 0.0060377358490566035,
"grad_norm": 11.722749710083008,
"learning_rate": 0.00011999999999999999,
"loss": 2.3506,
"step": 4
},
{
"epoch": 0.007547169811320755,
"grad_norm": 10.132170677185059,
"learning_rate": 0.00015,
"loss": 0.6881,
"step": 5
},
{
"epoch": 0.009056603773584906,
"grad_norm": 4.307816028594971,
"learning_rate": 0.00017999999999999998,
"loss": 0.1223,
"step": 6
},
{
"epoch": 0.010566037735849057,
"grad_norm": 0.7658965587615967,
"learning_rate": 0.00020999999999999998,
"loss": 0.0231,
"step": 7
},
{
"epoch": 0.012075471698113207,
"grad_norm": 0.04265095666050911,
"learning_rate": 0.00023999999999999998,
"loss": 0.0011,
"step": 8
},
{
"epoch": 0.013584905660377358,
"grad_norm": 0.017767086625099182,
"learning_rate": 0.00027,
"loss": 0.0003,
"step": 9
},
{
"epoch": 0.01509433962264151,
"grad_norm": 0.011602415703237057,
"learning_rate": 0.0003,
"loss": 0.0002,
"step": 10
},
{
"epoch": 0.01660377358490566,
"grad_norm": 0.03181695565581322,
"learning_rate": 0.0002999794957488703,
"loss": 0.0004,
"step": 11
},
{
"epoch": 0.018113207547169812,
"grad_norm": 0.05330698564648628,
"learning_rate": 0.0002999179886011389,
"loss": 0.0005,
"step": 12
},
{
"epoch": 0.019622641509433963,
"grad_norm": 0.025343570858240128,
"learning_rate": 0.0002998154953722457,
"loss": 0.0002,
"step": 13
},
{
"epoch": 0.021132075471698115,
"grad_norm": 0.0021175649017095566,
"learning_rate": 0.00029967204408281613,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.022641509433962263,
"grad_norm": 0.0011911799665540457,
"learning_rate": 0.00029948767395100045,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.024150943396226414,
"grad_norm": 0.0003560419427230954,
"learning_rate": 0.0002992624353817517,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.025660377358490565,
"grad_norm": 0.00067779456730932,
"learning_rate": 0.0002989963899530457,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.027169811320754716,
"grad_norm": 0.0009647771366871893,
"learning_rate": 0.00029868961039904624,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.028679245283018868,
"grad_norm": 0.0003761858679354191,
"learning_rate": 0.00029834218059022024,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.03018867924528302,
"grad_norm": 0.0005870265304110944,
"learning_rate": 0.00029795419551040833,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.03169811320754717,
"grad_norm": 0.000383986858651042,
"learning_rate": 0.00029752576123085736,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.03320754716981132,
"grad_norm": 0.005932660773396492,
"learning_rate": 0.0002970569948812214,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.03471698113207547,
"grad_norm": 36.32376480102539,
"learning_rate": 0.0002965480246175399,
"loss": 0.2835,
"step": 23
},
{
"epoch": 0.036226415094339624,
"grad_norm": 2.1993796825408936,
"learning_rate": 0.0002959989895872009,
"loss": 0.0043,
"step": 24
},
{
"epoch": 0.03773584905660377,
"grad_norm": 0.00012767089356202632,
"learning_rate": 0.0002954100398908995,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.03773584905660377,
"eval_loss": 7.517421181546524e-05,
"eval_runtime": 4.2094,
"eval_samples_per_second": 11.878,
"eval_steps_per_second": 1.663,
"step": 25
},
{
"epoch": 0.03924528301886793,
"grad_norm": 0.0003870428481604904,
"learning_rate": 0.0002947813365416023,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.040754716981132075,
"grad_norm": 0.005472894757986069,
"learning_rate": 0.0002941130514205272,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.04226415094339623,
"grad_norm": 0.07725609093904495,
"learning_rate": 0.0002934053672301536,
"loss": 0.0004,
"step": 28
},
{
"epoch": 0.04377358490566038,
"grad_norm": 0.02922365628182888,
"learning_rate": 0.00029265847744427303,
"loss": 0.0001,
"step": 29
},
{
"epoch": 0.045283018867924525,
"grad_norm": 0.022537946701049805,
"learning_rate": 0.00029187258625509513,
"loss": 0.0001,
"step": 30
},
{
"epoch": 0.04679245283018868,
"grad_norm": 0.003779177786782384,
"learning_rate": 0.00029104790851742417,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.04830188679245283,
"grad_norm": 0.0010583605617284775,
"learning_rate": 0.0002901846696899191,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.04981132075471698,
"grad_norm": 0.00034484267234802246,
"learning_rate": 0.00028928310577345606,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.05132075471698113,
"grad_norm": 0.00039084404124878347,
"learning_rate": 0.0002883434632466077,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.052830188679245285,
"grad_norm": 0.0004337659338489175,
"learning_rate": 0.00028736599899825856,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.05433962264150943,
"grad_norm": 0.0008137256954796612,
"learning_rate": 0.00028635098025737434,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.05584905660377359,
"grad_norm": 0.0009190964046865702,
"learning_rate": 0.00028529868451994384,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.057358490566037736,
"grad_norm": 0.0008511942578479648,
"learning_rate": 0.0002842093994731145,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.05886792452830188,
"grad_norm": 0.0011096836533397436,
"learning_rate": 0.00028308342291654174,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.06037735849056604,
"grad_norm": 0.0015806319424882531,
"learning_rate": 0.00028192106268097334,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.061886792452830186,
"grad_norm": 0.0018580264877527952,
"learning_rate": 0.00028072263654409154,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.06339622641509433,
"grad_norm": 0.0010565021075308323,
"learning_rate": 0.0002794884721436361,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.0649056603773585,
"grad_norm": 0.0007690914790146053,
"learning_rate": 0.00027821890688783083,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.06641509433962264,
"grad_norm": 0.004054183140397072,
"learning_rate": 0.0002769142878631403,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.06792452830188679,
"grad_norm": 0.000730887521058321,
"learning_rate": 0.00027557497173937923,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.06943396226415094,
"grad_norm": 0.00044435824383981526,
"learning_rate": 0.000274201324672203,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.0709433962264151,
"grad_norm": 0.00042757357005029917,
"learning_rate": 0.00027279372220300385,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.07245283018867925,
"grad_norm": 0.0003725688438862562,
"learning_rate": 0.0002713525491562421,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.0739622641509434,
"grad_norm": 0.001020943047478795,
"learning_rate": 0.00026987819953423867,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.07547169811320754,
"grad_norm": 0.0002570598153397441,
"learning_rate": 0.00026837107640945905,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.07547169811320754,
"eval_loss": 2.3195319954538718e-05,
"eval_runtime": 4.2144,
"eval_samples_per_second": 11.864,
"eval_steps_per_second": 1.661,
"step": 50
},
{
"epoch": 0.07698113207547169,
"grad_norm": 0.15251204371452332,
"learning_rate": 0.0002668315918143169,
"loss": 0.0003,
"step": 51
},
{
"epoch": 0.07849056603773585,
"grad_norm": 0.005662671290338039,
"learning_rate": 0.00026526016662852886,
"loss": 0.0,
"step": 52
},
{
"epoch": 0.08,
"grad_norm": 0.0022388154175132513,
"learning_rate": 0.00026365723046405023,
"loss": 0.0,
"step": 53
},
{
"epoch": 0.08150943396226415,
"grad_norm": 0.0037454920820891857,
"learning_rate": 0.0002620232215476231,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.0830188679245283,
"grad_norm": 0.015162762254476547,
"learning_rate": 0.0002603585866009697,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.08452830188679246,
"grad_norm": 0.002864385489374399,
"learning_rate": 0.00025866378071866334,
"loss": 0.0,
"step": 56
},
{
"epoch": 0.0860377358490566,
"grad_norm": 0.0016423336928710341,
"learning_rate": 0.00025693926724370956,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.08754716981132075,
"grad_norm": 0.0003825683379545808,
"learning_rate": 0.00025518551764087326,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.0890566037735849,
"grad_norm": 0.00015799446555320174,
"learning_rate": 0.00025340301136778483,
"loss": 0.0,
"step": 59
},
{
"epoch": 0.09056603773584905,
"grad_norm": 0.00015713188622612506,
"learning_rate": 0.00025159223574386114,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.09207547169811321,
"grad_norm": 0.00025742893922142684,
"learning_rate": 0.0002497536858170772,
"loss": 0.0,
"step": 61
},
{
"epoch": 0.09358490566037736,
"grad_norm": 0.00015674770111218095,
"learning_rate": 0.00024788786422862526,
"loss": 0.0,
"step": 62
},
{
"epoch": 0.09509433962264151,
"grad_norm": 0.00010490669228602201,
"learning_rate": 0.00024599528107549745,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.09660377358490566,
"grad_norm": 0.00021542170725297183,
"learning_rate": 0.00024407645377103054,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.09811320754716982,
"grad_norm": 0.00013019933248870075,
"learning_rate": 0.00024213190690345018,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.09962264150943397,
"grad_norm": 0.00012139895261498168,
"learning_rate": 0.00024016217209245374,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.10113207547169811,
"grad_norm": 7.941039075376466e-05,
"learning_rate": 0.00023816778784387094,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.10264150943396226,
"grad_norm": 0.0001448198891011998,
"learning_rate": 0.0002361492994024415,
"loss": 0.0,
"step": 68
},
{
"epoch": 0.10415094339622641,
"grad_norm": 0.00020370357378851622,
"learning_rate": 0.0002341072586027509,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.10566037735849057,
"grad_norm": 0.00010941782966256142,
"learning_rate": 0.00023204222371836405,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.10716981132075472,
"grad_norm": 0.0001951769954757765,
"learning_rate": 0.00022995475930919905,
"loss": 0.0,
"step": 71
},
{
"epoch": 0.10867924528301887,
"grad_norm": 0.0001411143020959571,
"learning_rate": 0.00022784543606718227,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.11018867924528301,
"grad_norm": 0.00015462335431948304,
"learning_rate": 0.00022571483066022657,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.11169811320754718,
"grad_norm": 0.0002607516653370112,
"learning_rate": 0.0002235635255745762,
"loss": 0.0,
"step": 74
},
{
"epoch": 0.11320754716981132,
"grad_norm": 7.921539508970454e-05,
"learning_rate": 0.00022139210895556104,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.11320754716981132,
"eval_loss": 1.744836140460393e-06,
"eval_runtime": 4.2094,
"eval_samples_per_second": 11.878,
"eval_steps_per_second": 1.663,
"step": 75
},
{
"epoch": 0.11471698113207547,
"grad_norm": 9.14011980057694e-05,
"learning_rate": 0.00021920117444680317,
"loss": 0.0,
"step": 76
},
{
"epoch": 0.11622641509433962,
"grad_norm": 0.00013622430560644716,
"learning_rate": 0.00021699132102792097,
"loss": 0.0,
"step": 77
},
{
"epoch": 0.11773584905660377,
"grad_norm": 0.00016065326053649187,
"learning_rate": 0.0002147631528507739,
"loss": 0.0,
"step": 78
},
{
"epoch": 0.11924528301886793,
"grad_norm": 0.00017030604067258537,
"learning_rate": 0.00021251727907429355,
"loss": 0.0,
"step": 79
},
{
"epoch": 0.12075471698113208,
"grad_norm": 0.0002959435514640063,
"learning_rate": 0.0002102543136979454,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.12226415094339622,
"grad_norm": 7.727096817689016e-05,
"learning_rate": 0.0002079748753938678,
"loss": 0.0,
"step": 81
},
{
"epoch": 0.12377358490566037,
"grad_norm": 0.0001273597008548677,
"learning_rate": 0.0002056795873377331,
"loss": 0.0,
"step": 82
},
{
"epoch": 0.12528301886792453,
"grad_norm": 6.839417619630694e-05,
"learning_rate": 0.00020336907703837748,
"loss": 0.0,
"step": 83
},
{
"epoch": 0.12679245283018867,
"grad_norm": 6.494703120552003e-05,
"learning_rate": 0.00020104397616624645,
"loss": 0.0,
"step": 84
},
{
"epoch": 0.12830188679245283,
"grad_norm": 6.701619713567197e-05,
"learning_rate": 0.00019870492038070252,
"loss": 0.0,
"step": 85
},
{
"epoch": 0.129811320754717,
"grad_norm": 0.00010753441893029958,
"learning_rate": 0.0001963525491562421,
"loss": 0.0,
"step": 86
},
{
"epoch": 0.13132075471698113,
"grad_norm": 0.00015338734374381602,
"learning_rate": 0.0001939875056076697,
"loss": 0.0,
"step": 87
},
{
"epoch": 0.1328301886792453,
"grad_norm": 6.136780575616285e-05,
"learning_rate": 0.00019161043631427666,
"loss": 0.0,
"step": 88
},
{
"epoch": 0.13433962264150942,
"grad_norm": 9.956698340829462e-05,
"learning_rate": 0.00018922199114307294,
"loss": 0.0,
"step": 89
},
{
"epoch": 0.13584905660377358,
"grad_norm": 0.0001380999747198075,
"learning_rate": 0.00018682282307111987,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.13735849056603774,
"grad_norm": 6.714855408063158e-05,
"learning_rate": 0.00018441358800701273,
"loss": 0.0,
"step": 91
},
{
"epoch": 0.13886792452830188,
"grad_norm": 0.00010223900608252734,
"learning_rate": 0.00018199494461156203,
"loss": 0.0,
"step": 92
},
{
"epoch": 0.14037735849056604,
"grad_norm": 0.00013666001905221492,
"learning_rate": 0.000179567554117722,
"loss": 0.0,
"step": 93
},
{
"epoch": 0.1418867924528302,
"grad_norm": 0.0001388599193887785,
"learning_rate": 0.00017713208014981648,
"loss": 0.0,
"step": 94
},
{
"epoch": 0.14339622641509434,
"grad_norm": 9.564343781676143e-05,
"learning_rate": 0.00017468918854211007,
"loss": 0.0,
"step": 95
},
{
"epoch": 0.1449056603773585,
"grad_norm": 0.00011167208140250295,
"learning_rate": 0.00017223954715677627,
"loss": 0.0,
"step": 96
},
{
"epoch": 0.14641509433962263,
"grad_norm": 0.0002111889043590054,
"learning_rate": 0.00016978382570131034,
"loss": 0.0,
"step": 97
},
{
"epoch": 0.1479245283018868,
"grad_norm": 0.00010389567614765838,
"learning_rate": 0.00016732269554543794,
"loss": 0.0,
"step": 98
},
{
"epoch": 0.14943396226415095,
"grad_norm": 0.00016280317504424602,
"learning_rate": 0.00016485682953756942,
"loss": 0.0,
"step": 99
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.00014044858107808977,
"learning_rate": 0.00016238690182084986,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.1509433962264151,
"eval_loss": 1.082283802134043e-06,
"eval_runtime": 4.2074,
"eval_samples_per_second": 11.884,
"eval_steps_per_second": 1.664,
"step": 100
},
{
"epoch": 0.15245283018867925,
"grad_norm": 2.1298315914464183e-05,
"learning_rate": 0.0001599135876488549,
"loss": 0.0,
"step": 101
},
{
"epoch": 0.15396226415094338,
"grad_norm": 0.00017682627367321402,
"learning_rate": 0.00015743756320098332,
"loss": 0.0,
"step": 102
},
{
"epoch": 0.15547169811320755,
"grad_norm": 4.0571725548943505e-05,
"learning_rate": 0.0001549595053975962,
"loss": 0.0,
"step": 103
},
{
"epoch": 0.1569811320754717,
"grad_norm": 3.1571042200084776e-05,
"learning_rate": 0.00015248009171495378,
"loss": 0.0,
"step": 104
},
{
"epoch": 0.15849056603773584,
"grad_norm": 1.788660119927954e-05,
"learning_rate": 0.00015,
"loss": 0.0,
"step": 105
},
{
"epoch": 0.16,
"grad_norm": 1.5078448086569551e-05,
"learning_rate": 0.00014751990828504622,
"loss": 0.0,
"step": 106
},
{
"epoch": 0.16150943396226414,
"grad_norm": 2.4111786842695437e-05,
"learning_rate": 0.00014504049460240375,
"loss": 0.0,
"step": 107
},
{
"epoch": 0.1630188679245283,
"grad_norm": 2.8707843739539385e-05,
"learning_rate": 0.00014256243679901663,
"loss": 0.0,
"step": 108
},
{
"epoch": 0.16452830188679246,
"grad_norm": 4.438353062141687e-05,
"learning_rate": 0.00014008641235114508,
"loss": 0.0,
"step": 109
},
{
"epoch": 0.1660377358490566,
"grad_norm": 3.8902497180970386e-05,
"learning_rate": 0.00013761309817915014,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.16754716981132076,
"grad_norm": 6.369838956743479e-05,
"learning_rate": 0.00013514317046243058,
"loss": 0.0,
"step": 111
},
{
"epoch": 0.16905660377358492,
"grad_norm": 6.360220140777528e-05,
"learning_rate": 0.00013267730445456208,
"loss": 0.0,
"step": 112
},
{
"epoch": 0.17056603773584905,
"grad_norm": 4.417836316861212e-05,
"learning_rate": 0.00013021617429868963,
"loss": 0.0,
"step": 113
},
{
"epoch": 0.1720754716981132,
"grad_norm": 8.401078230235726e-05,
"learning_rate": 0.00012776045284322368,
"loss": 0.0,
"step": 114
},
{
"epoch": 0.17358490566037735,
"grad_norm": 4.5576893171528354e-05,
"learning_rate": 0.00012531081145788987,
"loss": 0.0,
"step": 115
},
{
"epoch": 0.1750943396226415,
"grad_norm": 3.908126745955087e-05,
"learning_rate": 0.00012286791985018355,
"loss": 0.0,
"step": 116
},
{
"epoch": 0.17660377358490567,
"grad_norm": 4.5852139010094106e-05,
"learning_rate": 0.00012043244588227796,
"loss": 0.0,
"step": 117
},
{
"epoch": 0.1781132075471698,
"grad_norm": 5.557672920986079e-05,
"learning_rate": 0.00011800505538843798,
"loss": 0.0,
"step": 118
},
{
"epoch": 0.17962264150943397,
"grad_norm": 0.0001369229139527306,
"learning_rate": 0.00011558641199298727,
"loss": 0.0,
"step": 119
},
{
"epoch": 0.1811320754716981,
"grad_norm": 6.510502134915441e-05,
"learning_rate": 0.00011317717692888012,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.18264150943396226,
"grad_norm": 0.00012901521404273808,
"learning_rate": 0.00011077800885692702,
"loss": 0.0,
"step": 121
},
{
"epoch": 0.18415094339622642,
"grad_norm": 7.49451428418979e-05,
"learning_rate": 0.00010838956368572334,
"loss": 0.0,
"step": 122
},
{
"epoch": 0.18566037735849056,
"grad_norm": 8.595697727287188e-05,
"learning_rate": 0.0001060124943923303,
"loss": 0.0,
"step": 123
},
{
"epoch": 0.18716981132075472,
"grad_norm": 6.461943121394143e-05,
"learning_rate": 0.0001036474508437579,
"loss": 0.0,
"step": 124
},
{
"epoch": 0.18867924528301888,
"grad_norm": 5.69835101487115e-05,
"learning_rate": 0.00010129507961929748,
"loss": 0.0,
"step": 125
},
{
"epoch": 0.18867924528301888,
"eval_loss": 8.622415066383837e-07,
"eval_runtime": 4.2048,
"eval_samples_per_second": 11.891,
"eval_steps_per_second": 1.665,
"step": 125
},
{
"epoch": 0.19018867924528302,
"grad_norm": 3.0237964892876334e-05,
"learning_rate": 9.895602383375353e-05,
"loss": 0.0,
"step": 126
},
{
"epoch": 0.19169811320754718,
"grad_norm": 2.042949381575454e-05,
"learning_rate": 9.663092296162251e-05,
"loss": 0.0,
"step": 127
},
{
"epoch": 0.1932075471698113,
"grad_norm": 3.0739392968825996e-05,
"learning_rate": 9.432041266226686e-05,
"loss": 0.0,
"step": 128
},
{
"epoch": 0.19471698113207547,
"grad_norm": 8.033741323743016e-05,
"learning_rate": 9.202512460613219e-05,
"loss": 0.0,
"step": 129
},
{
"epoch": 0.19622641509433963,
"grad_norm": 6.30953727522865e-05,
"learning_rate": 8.97456863020546e-05,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.19773584905660377,
"grad_norm": 1.7301101252087392e-05,
"learning_rate": 8.748272092570646e-05,
"loss": 0.0,
"step": 131
},
{
"epoch": 0.19924528301886793,
"grad_norm": 3.599835326895118e-05,
"learning_rate": 8.523684714922608e-05,
"loss": 0.0,
"step": 132
},
{
"epoch": 0.20075471698113206,
"grad_norm": 4.0078033634927124e-05,
"learning_rate": 8.300867897207903e-05,
"loss": 0.0,
"step": 133
},
{
"epoch": 0.20226415094339623,
"grad_norm": 4.6186356485122815e-05,
"learning_rate": 8.079882555319684e-05,
"loss": 0.0,
"step": 134
},
{
"epoch": 0.2037735849056604,
"grad_norm": 4.505734978010878e-05,
"learning_rate": 7.860789104443896e-05,
"loss": 0.0,
"step": 135
},
{
"epoch": 0.20528301886792452,
"grad_norm": 4.943280146108009e-05,
"learning_rate": 7.643647442542382e-05,
"loss": 0.0,
"step": 136
},
{
"epoch": 0.20679245283018868,
"grad_norm": 4.755162444780581e-05,
"learning_rate": 7.428516933977347e-05,
"loss": 0.0,
"step": 137
},
{
"epoch": 0.20830188679245282,
"grad_norm": 4.70041049993597e-05,
"learning_rate": 7.215456393281776e-05,
"loss": 0.0,
"step": 138
},
{
"epoch": 0.20981132075471698,
"grad_norm": 3.964366624131799e-05,
"learning_rate": 7.004524069080096e-05,
"loss": 0.0,
"step": 139
},
{
"epoch": 0.21132075471698114,
"grad_norm": 0.42303475737571716,
"learning_rate": 6.795777628163599e-05,
"loss": 0.0002,
"step": 140
},
{
"epoch": 0.21283018867924527,
"grad_norm": 0.00010890993871726096,
"learning_rate": 6.58927413972491e-05,
"loss": 0.0,
"step": 141
},
{
"epoch": 0.21433962264150944,
"grad_norm": 5.2230472647352144e-05,
"learning_rate": 6.385070059755846e-05,
"loss": 0.0,
"step": 142
},
{
"epoch": 0.2158490566037736,
"grad_norm": 0.9842696785926819,
"learning_rate": 6.183221215612904e-05,
"loss": 0.0004,
"step": 143
},
{
"epoch": 0.21735849056603773,
"grad_norm": 3.418095002416521e-05,
"learning_rate": 5.983782790754623e-05,
"loss": 0.0,
"step": 144
},
{
"epoch": 0.2188679245283019,
"grad_norm": 8.156678086379543e-05,
"learning_rate": 5.786809309654982e-05,
"loss": 0.0,
"step": 145
},
{
"epoch": 0.22037735849056603,
"grad_norm": 0.0001543686812510714,
"learning_rate": 5.592354622896944e-05,
"loss": 0.0,
"step": 146
},
{
"epoch": 0.2218867924528302,
"grad_norm": 0.00012703303946182132,
"learning_rate": 5.40047189245025e-05,
"loss": 0.0,
"step": 147
},
{
"epoch": 0.22339622641509435,
"grad_norm": 0.00015206955140456557,
"learning_rate": 5.211213577137469e-05,
"loss": 0.0,
"step": 148
},
{
"epoch": 0.22490566037735849,
"grad_norm": 0.0001602250849828124,
"learning_rate": 5.024631418292274e-05,
"loss": 0.0,
"step": 149
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.0002219385205535218,
"learning_rate": 4.840776425613886e-05,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.22641509433962265,
"eval_loss": 1.065322521753842e-06,
"eval_runtime": 4.213,
"eval_samples_per_second": 11.868,
"eval_steps_per_second": 1.662,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.364765716348928e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}