hf-100's picture
Upload folder using huggingface_hub
6933b63 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.4466546112115732,
"eval_steps": 1000,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007233273056057866,
"grad_norm": 0.41587013006210327,
"learning_rate": 2e-05,
"loss": 1.4019,
"step": 1
},
{
"epoch": 0.014466546112115732,
"grad_norm": 0.44406870007514954,
"learning_rate": 4e-05,
"loss": 1.3296,
"step": 2
},
{
"epoch": 0.0216998191681736,
"grad_norm": 0.36022505164146423,
"learning_rate": 6e-05,
"loss": 1.2996,
"step": 3
},
{
"epoch": 0.028933092224231464,
"grad_norm": 0.4058440923690796,
"learning_rate": 8e-05,
"loss": 1.3372,
"step": 4
},
{
"epoch": 0.03616636528028933,
"grad_norm": 0.22433464229106903,
"learning_rate": 0.0001,
"loss": 1.2748,
"step": 5
},
{
"epoch": 0.0433996383363472,
"grad_norm": 0.1584281474351883,
"learning_rate": 9.963099630996311e-05,
"loss": 1.1955,
"step": 6
},
{
"epoch": 0.05063291139240506,
"grad_norm": 0.1598576456308365,
"learning_rate": 9.92619926199262e-05,
"loss": 1.132,
"step": 7
},
{
"epoch": 0.05786618444846293,
"grad_norm": 1.4637757539749146,
"learning_rate": 9.889298892988931e-05,
"loss": 1.1369,
"step": 8
},
{
"epoch": 0.0650994575045208,
"grad_norm": 0.18311572074890137,
"learning_rate": 9.85239852398524e-05,
"loss": 1.0405,
"step": 9
},
{
"epoch": 0.07233273056057866,
"grad_norm": 1.6665862798690796,
"learning_rate": 9.81549815498155e-05,
"loss": 0.9399,
"step": 10
},
{
"epoch": 0.07956600361663653,
"grad_norm": 0.3348374366760254,
"learning_rate": 9.77859778597786e-05,
"loss": 0.8621,
"step": 11
},
{
"epoch": 0.0867992766726944,
"grad_norm": 0.3106568157672882,
"learning_rate": 9.74169741697417e-05,
"loss": 0.7543,
"step": 12
},
{
"epoch": 0.09403254972875226,
"grad_norm": 0.4362149238586426,
"learning_rate": 9.70479704797048e-05,
"loss": 0.7134,
"step": 13
},
{
"epoch": 0.10126582278481013,
"grad_norm": 0.26037415862083435,
"learning_rate": 9.66789667896679e-05,
"loss": 0.6487,
"step": 14
},
{
"epoch": 0.10849909584086799,
"grad_norm": 0.2504118084907532,
"learning_rate": 9.6309963099631e-05,
"loss": 0.56,
"step": 15
},
{
"epoch": 0.11573236889692586,
"grad_norm": 0.18326584994792938,
"learning_rate": 9.59409594095941e-05,
"loss": 0.5336,
"step": 16
},
{
"epoch": 0.12296564195298372,
"grad_norm": 0.13953259587287903,
"learning_rate": 9.55719557195572e-05,
"loss": 0.5503,
"step": 17
},
{
"epoch": 0.1301989150090416,
"grad_norm": 0.1669919639825821,
"learning_rate": 9.52029520295203e-05,
"loss": 0.4679,
"step": 18
},
{
"epoch": 0.13743218806509946,
"grad_norm": 0.1242203637957573,
"learning_rate": 9.48339483394834e-05,
"loss": 0.5007,
"step": 19
},
{
"epoch": 0.14466546112115733,
"grad_norm": 0.1188986673951149,
"learning_rate": 9.44649446494465e-05,
"loss": 0.4713,
"step": 20
},
{
"epoch": 0.1518987341772152,
"grad_norm": 0.10761572420597076,
"learning_rate": 9.40959409594096e-05,
"loss": 0.5056,
"step": 21
},
{
"epoch": 0.15913200723327306,
"grad_norm": 0.0960497260093689,
"learning_rate": 9.37269372693727e-05,
"loss": 0.4628,
"step": 22
},
{
"epoch": 0.16636528028933092,
"grad_norm": 0.09519140422344208,
"learning_rate": 9.33579335793358e-05,
"loss": 0.5124,
"step": 23
},
{
"epoch": 0.1735985533453888,
"grad_norm": 0.09196989238262177,
"learning_rate": 9.298892988929889e-05,
"loss": 0.4828,
"step": 24
},
{
"epoch": 0.18083182640144665,
"grad_norm": 0.0806601420044899,
"learning_rate": 9.2619926199262e-05,
"loss": 0.4565,
"step": 25
},
{
"epoch": 0.18806509945750452,
"grad_norm": 0.09966598451137543,
"learning_rate": 9.22509225092251e-05,
"loss": 0.4614,
"step": 26
},
{
"epoch": 0.19529837251356238,
"grad_norm": 0.0852133184671402,
"learning_rate": 9.18819188191882e-05,
"loss": 0.5014,
"step": 27
},
{
"epoch": 0.20253164556962025,
"grad_norm": 0.1128767654299736,
"learning_rate": 9.15129151291513e-05,
"loss": 0.4671,
"step": 28
},
{
"epoch": 0.20976491862567812,
"grad_norm": 0.08861073851585388,
"learning_rate": 9.11439114391144e-05,
"loss": 0.4635,
"step": 29
},
{
"epoch": 0.21699819168173598,
"grad_norm": 0.08107278496026993,
"learning_rate": 9.077490774907749e-05,
"loss": 0.4796,
"step": 30
},
{
"epoch": 0.22423146473779385,
"grad_norm": 0.08996085822582245,
"learning_rate": 9.040590405904059e-05,
"loss": 0.4691,
"step": 31
},
{
"epoch": 0.2314647377938517,
"grad_norm": 0.11098525673151016,
"learning_rate": 9.00369003690037e-05,
"loss": 0.4796,
"step": 32
},
{
"epoch": 0.23869801084990958,
"grad_norm": 0.09458429366350174,
"learning_rate": 8.966789667896679e-05,
"loss": 0.4645,
"step": 33
},
{
"epoch": 0.24593128390596744,
"grad_norm": 0.09619155526161194,
"learning_rate": 8.92988929889299e-05,
"loss": 0.4653,
"step": 34
},
{
"epoch": 0.25316455696202533,
"grad_norm": 0.0921136811375618,
"learning_rate": 8.892988929889299e-05,
"loss": 0.4627,
"step": 35
},
{
"epoch": 0.2603978300180832,
"grad_norm": 0.0855991542339325,
"learning_rate": 8.85608856088561e-05,
"loss": 0.4703,
"step": 36
},
{
"epoch": 0.26763110307414106,
"grad_norm": 0.08778424561023712,
"learning_rate": 8.819188191881919e-05,
"loss": 0.4555,
"step": 37
},
{
"epoch": 0.27486437613019893,
"grad_norm": 0.08391483128070831,
"learning_rate": 8.782287822878229e-05,
"loss": 0.4438,
"step": 38
},
{
"epoch": 0.2820976491862568,
"grad_norm": 0.084780752658844,
"learning_rate": 8.74538745387454e-05,
"loss": 0.4674,
"step": 39
},
{
"epoch": 0.28933092224231466,
"grad_norm": 0.10035159438848495,
"learning_rate": 8.708487084870849e-05,
"loss": 0.3861,
"step": 40
},
{
"epoch": 0.2965641952983725,
"grad_norm": 0.08539566397666931,
"learning_rate": 8.67158671586716e-05,
"loss": 0.4729,
"step": 41
},
{
"epoch": 0.3037974683544304,
"grad_norm": 0.0902961790561676,
"learning_rate": 8.634686346863469e-05,
"loss": 0.4292,
"step": 42
},
{
"epoch": 0.31103074141048825,
"grad_norm": 0.08653435111045837,
"learning_rate": 8.59778597785978e-05,
"loss": 0.4643,
"step": 43
},
{
"epoch": 0.3182640144665461,
"grad_norm": 0.079147107899189,
"learning_rate": 8.560885608856088e-05,
"loss": 0.4705,
"step": 44
},
{
"epoch": 0.325497287522604,
"grad_norm": 0.10212749987840652,
"learning_rate": 8.523985239852399e-05,
"loss": 0.4731,
"step": 45
},
{
"epoch": 0.33273056057866185,
"grad_norm": 0.10761305689811707,
"learning_rate": 8.48708487084871e-05,
"loss": 0.4227,
"step": 46
},
{
"epoch": 0.3399638336347197,
"grad_norm": 0.08965682238340378,
"learning_rate": 8.450184501845019e-05,
"loss": 0.4233,
"step": 47
},
{
"epoch": 0.3471971066907776,
"grad_norm": 0.08309852331876755,
"learning_rate": 8.41328413284133e-05,
"loss": 0.4551,
"step": 48
},
{
"epoch": 0.35443037974683544,
"grad_norm": 0.07594166696071625,
"learning_rate": 8.376383763837639e-05,
"loss": 0.4217,
"step": 49
},
{
"epoch": 0.3616636528028933,
"grad_norm": 0.08812757581472397,
"learning_rate": 8.339483394833948e-05,
"loss": 0.4255,
"step": 50
},
{
"epoch": 0.3688969258589512,
"grad_norm": 0.08730728924274445,
"learning_rate": 8.302583025830258e-05,
"loss": 0.4128,
"step": 51
},
{
"epoch": 0.37613019891500904,
"grad_norm": 0.09240879118442535,
"learning_rate": 8.265682656826569e-05,
"loss": 0.4653,
"step": 52
},
{
"epoch": 0.3833634719710669,
"grad_norm": 0.08884631842374802,
"learning_rate": 8.228782287822878e-05,
"loss": 0.4742,
"step": 53
},
{
"epoch": 0.39059674502712477,
"grad_norm": 0.0874699056148529,
"learning_rate": 8.191881918819189e-05,
"loss": 0.3974,
"step": 54
},
{
"epoch": 0.39783001808318263,
"grad_norm": 0.08770371228456497,
"learning_rate": 8.154981549815498e-05,
"loss": 0.4451,
"step": 55
},
{
"epoch": 0.4050632911392405,
"grad_norm": 0.08077302575111389,
"learning_rate": 8.118081180811809e-05,
"loss": 0.4346,
"step": 56
},
{
"epoch": 0.41229656419529837,
"grad_norm": 0.09575826674699783,
"learning_rate": 8.081180811808118e-05,
"loss": 0.4494,
"step": 57
},
{
"epoch": 0.41952983725135623,
"grad_norm": 0.08907500654459,
"learning_rate": 8.044280442804428e-05,
"loss": 0.4918,
"step": 58
},
{
"epoch": 0.4267631103074141,
"grad_norm": 0.0987161323428154,
"learning_rate": 8.007380073800739e-05,
"loss": 0.4145,
"step": 59
},
{
"epoch": 0.43399638336347196,
"grad_norm": 0.07847721874713898,
"learning_rate": 7.970479704797048e-05,
"loss": 0.4415,
"step": 60
},
{
"epoch": 0.4412296564195298,
"grad_norm": 0.08011084794998169,
"learning_rate": 7.933579335793359e-05,
"loss": 0.398,
"step": 61
},
{
"epoch": 0.4484629294755877,
"grad_norm": 0.0902714878320694,
"learning_rate": 7.896678966789668e-05,
"loss": 0.4167,
"step": 62
},
{
"epoch": 0.45569620253164556,
"grad_norm": 0.0794120728969574,
"learning_rate": 7.859778597785979e-05,
"loss": 0.388,
"step": 63
},
{
"epoch": 0.4629294755877034,
"grad_norm": 0.07921893149614334,
"learning_rate": 7.822878228782288e-05,
"loss": 0.4366,
"step": 64
},
{
"epoch": 0.4701627486437613,
"grad_norm": 0.09170780330896378,
"learning_rate": 7.785977859778598e-05,
"loss": 0.4349,
"step": 65
},
{
"epoch": 0.47739602169981915,
"grad_norm": 0.084025539457798,
"learning_rate": 7.749077490774908e-05,
"loss": 0.4183,
"step": 66
},
{
"epoch": 0.484629294755877,
"grad_norm": 0.08458651602268219,
"learning_rate": 7.712177121771218e-05,
"loss": 0.405,
"step": 67
},
{
"epoch": 0.4918625678119349,
"grad_norm": 0.09312830120325089,
"learning_rate": 7.675276752767529e-05,
"loss": 0.4499,
"step": 68
},
{
"epoch": 0.49909584086799275,
"grad_norm": 0.09968719631433487,
"learning_rate": 7.638376383763838e-05,
"loss": 0.4457,
"step": 69
},
{
"epoch": 0.5063291139240507,
"grad_norm": 0.0874272808432579,
"learning_rate": 7.601476014760149e-05,
"loss": 0.418,
"step": 70
},
{
"epoch": 0.5135623869801085,
"grad_norm": 0.23602771759033203,
"learning_rate": 7.564575645756458e-05,
"loss": 0.4256,
"step": 71
},
{
"epoch": 0.5207956600361664,
"grad_norm": 0.08977050334215164,
"learning_rate": 7.527675276752768e-05,
"loss": 0.4294,
"step": 72
},
{
"epoch": 0.5280289330922242,
"grad_norm": 0.10940728336572647,
"learning_rate": 7.490774907749077e-05,
"loss": 0.4049,
"step": 73
},
{
"epoch": 0.5352622061482821,
"grad_norm": 0.08830838650465012,
"learning_rate": 7.453874538745388e-05,
"loss": 0.4485,
"step": 74
},
{
"epoch": 0.5424954792043399,
"grad_norm": 0.1179407611489296,
"learning_rate": 7.416974169741697e-05,
"loss": 0.4356,
"step": 75
},
{
"epoch": 0.5497287522603979,
"grad_norm": 0.08437330275774002,
"learning_rate": 7.380073800738008e-05,
"loss": 0.4223,
"step": 76
},
{
"epoch": 0.5569620253164557,
"grad_norm": 0.08194423466920853,
"learning_rate": 7.343173431734319e-05,
"loss": 0.4233,
"step": 77
},
{
"epoch": 0.5641952983725136,
"grad_norm": 0.10404617339372635,
"learning_rate": 7.306273062730628e-05,
"loss": 0.4049,
"step": 78
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.07664304226636887,
"learning_rate": 7.269372693726938e-05,
"loss": 0.4025,
"step": 79
},
{
"epoch": 0.5786618444846293,
"grad_norm": 0.07666060328483582,
"learning_rate": 7.232472324723247e-05,
"loss": 0.3881,
"step": 80
},
{
"epoch": 0.5858951175406871,
"grad_norm": 0.08287478983402252,
"learning_rate": 7.195571955719558e-05,
"loss": 0.413,
"step": 81
},
{
"epoch": 0.593128390596745,
"grad_norm": 0.13137191534042358,
"learning_rate": 7.158671586715867e-05,
"loss": 0.3793,
"step": 82
},
{
"epoch": 0.6003616636528029,
"grad_norm": 0.07782291620969772,
"learning_rate": 7.121771217712178e-05,
"loss": 0.4577,
"step": 83
},
{
"epoch": 0.6075949367088608,
"grad_norm": 0.07620645314455032,
"learning_rate": 7.084870848708487e-05,
"loss": 0.4075,
"step": 84
},
{
"epoch": 0.6148282097649186,
"grad_norm": 0.07860767841339111,
"learning_rate": 7.047970479704797e-05,
"loss": 0.3953,
"step": 85
},
{
"epoch": 0.6220614828209765,
"grad_norm": 0.08548825234174728,
"learning_rate": 7.011070110701108e-05,
"loss": 0.3983,
"step": 86
},
{
"epoch": 0.6292947558770343,
"grad_norm": 0.08447708934545517,
"learning_rate": 6.974169741697417e-05,
"loss": 0.3816,
"step": 87
},
{
"epoch": 0.6365280289330922,
"grad_norm": 0.09994114190340042,
"learning_rate": 6.937269372693728e-05,
"loss": 0.453,
"step": 88
},
{
"epoch": 0.64376130198915,
"grad_norm": 0.0818142294883728,
"learning_rate": 6.900369003690037e-05,
"loss": 0.4038,
"step": 89
},
{
"epoch": 0.650994575045208,
"grad_norm": 0.1171378344297409,
"learning_rate": 6.863468634686348e-05,
"loss": 0.4143,
"step": 90
},
{
"epoch": 0.6582278481012658,
"grad_norm": 0.08540075272321701,
"learning_rate": 6.826568265682657e-05,
"loss": 0.4273,
"step": 91
},
{
"epoch": 0.6654611211573237,
"grad_norm": 0.08481594175100327,
"learning_rate": 6.789667896678967e-05,
"loss": 0.4258,
"step": 92
},
{
"epoch": 0.6726943942133815,
"grad_norm": 0.11805010586977005,
"learning_rate": 6.752767527675276e-05,
"loss": 0.4122,
"step": 93
},
{
"epoch": 0.6799276672694394,
"grad_norm": 0.09864703565835953,
"learning_rate": 6.715867158671587e-05,
"loss": 0.4385,
"step": 94
},
{
"epoch": 0.6871609403254972,
"grad_norm": 0.08949287980794907,
"learning_rate": 6.678966789667896e-05,
"loss": 0.4023,
"step": 95
},
{
"epoch": 0.6943942133815552,
"grad_norm": 0.1031746193766594,
"learning_rate": 6.642066420664207e-05,
"loss": 0.4477,
"step": 96
},
{
"epoch": 0.701627486437613,
"grad_norm": 0.09214156121015549,
"learning_rate": 6.605166051660518e-05,
"loss": 0.4245,
"step": 97
},
{
"epoch": 0.7088607594936709,
"grad_norm": 0.08504794538021088,
"learning_rate": 6.568265682656827e-05,
"loss": 0.4031,
"step": 98
},
{
"epoch": 0.7160940325497287,
"grad_norm": 0.08531343191862106,
"learning_rate": 6.531365313653137e-05,
"loss": 0.4216,
"step": 99
},
{
"epoch": 0.7233273056057866,
"grad_norm": 0.08720685541629791,
"learning_rate": 6.494464944649446e-05,
"loss": 0.4259,
"step": 100
},
{
"epoch": 0.7305605786618445,
"grad_norm": 0.09215860813856125,
"learning_rate": 6.457564575645757e-05,
"loss": 0.4344,
"step": 101
},
{
"epoch": 0.7377938517179023,
"grad_norm": 0.15789636969566345,
"learning_rate": 6.420664206642066e-05,
"loss": 0.397,
"step": 102
},
{
"epoch": 0.7450271247739603,
"grad_norm": 0.0937284305691719,
"learning_rate": 6.383763837638377e-05,
"loss": 0.3967,
"step": 103
},
{
"epoch": 0.7522603978300181,
"grad_norm": 0.08737578988075256,
"learning_rate": 6.346863468634686e-05,
"loss": 0.4398,
"step": 104
},
{
"epoch": 0.759493670886076,
"grad_norm": 0.09929600358009338,
"learning_rate": 6.309963099630997e-05,
"loss": 0.3853,
"step": 105
},
{
"epoch": 0.7667269439421338,
"grad_norm": 0.08004901558160782,
"learning_rate": 6.273062730627307e-05,
"loss": 0.4303,
"step": 106
},
{
"epoch": 0.7739602169981917,
"grad_norm": 0.12077657133340836,
"learning_rate": 6.236162361623616e-05,
"loss": 0.4028,
"step": 107
},
{
"epoch": 0.7811934900542495,
"grad_norm": 0.09763742983341217,
"learning_rate": 6.199261992619927e-05,
"loss": 0.3978,
"step": 108
},
{
"epoch": 0.7884267631103075,
"grad_norm": 0.09035875648260117,
"learning_rate": 6.162361623616236e-05,
"loss": 0.4255,
"step": 109
},
{
"epoch": 0.7956600361663653,
"grad_norm": 0.08758627623319626,
"learning_rate": 6.125461254612547e-05,
"loss": 0.4174,
"step": 110
},
{
"epoch": 0.8028933092224232,
"grad_norm": 0.08751779049634933,
"learning_rate": 6.0885608856088563e-05,
"loss": 0.3956,
"step": 111
},
{
"epoch": 0.810126582278481,
"grad_norm": 0.08873230963945389,
"learning_rate": 6.0516605166051664e-05,
"loss": 0.3919,
"step": 112
},
{
"epoch": 0.8173598553345389,
"grad_norm": 0.093721404671669,
"learning_rate": 6.014760147601476e-05,
"loss": 0.3987,
"step": 113
},
{
"epoch": 0.8245931283905967,
"grad_norm": 0.09308823943138123,
"learning_rate": 5.9778597785977866e-05,
"loss": 0.4344,
"step": 114
},
{
"epoch": 0.8318264014466547,
"grad_norm": 0.09041070193052292,
"learning_rate": 5.940959409594096e-05,
"loss": 0.4278,
"step": 115
},
{
"epoch": 0.8390596745027125,
"grad_norm": 0.08302716165781021,
"learning_rate": 5.904059040590406e-05,
"loss": 0.3806,
"step": 116
},
{
"epoch": 0.8462929475587704,
"grad_norm": 0.09246783703565598,
"learning_rate": 5.867158671586717e-05,
"loss": 0.4459,
"step": 117
},
{
"epoch": 0.8535262206148282,
"grad_norm": 0.09770411998033524,
"learning_rate": 5.830258302583026e-05,
"loss": 0.4026,
"step": 118
},
{
"epoch": 0.8607594936708861,
"grad_norm": 0.08586518466472626,
"learning_rate": 5.7933579335793364e-05,
"loss": 0.3768,
"step": 119
},
{
"epoch": 0.8679927667269439,
"grad_norm": 0.09560060501098633,
"learning_rate": 5.756457564575646e-05,
"loss": 0.4223,
"step": 120
},
{
"epoch": 0.8752260397830018,
"grad_norm": 0.11039328575134277,
"learning_rate": 5.7195571955719566e-05,
"loss": 0.4136,
"step": 121
},
{
"epoch": 0.8824593128390597,
"grad_norm": 0.09169413149356842,
"learning_rate": 5.682656826568265e-05,
"loss": 0.4136,
"step": 122
},
{
"epoch": 0.8896925858951176,
"grad_norm": 0.08437476307153702,
"learning_rate": 5.645756457564576e-05,
"loss": 0.4148,
"step": 123
},
{
"epoch": 0.8969258589511754,
"grad_norm": 0.09134487807750702,
"learning_rate": 5.6088560885608855e-05,
"loss": 0.3816,
"step": 124
},
{
"epoch": 0.9041591320072333,
"grad_norm": 0.10290499776601791,
"learning_rate": 5.5719557195571956e-05,
"loss": 0.4165,
"step": 125
},
{
"epoch": 0.9113924050632911,
"grad_norm": 0.09082211554050446,
"learning_rate": 5.535055350553506e-05,
"loss": 0.4035,
"step": 126
},
{
"epoch": 0.918625678119349,
"grad_norm": 0.08819489926099777,
"learning_rate": 5.498154981549816e-05,
"loss": 0.3979,
"step": 127
},
{
"epoch": 0.9258589511754068,
"grad_norm": 0.13042791187763214,
"learning_rate": 5.461254612546126e-05,
"loss": 0.3724,
"step": 128
},
{
"epoch": 0.9330922242314648,
"grad_norm": 0.08860334753990173,
"learning_rate": 5.424354243542435e-05,
"loss": 0.4137,
"step": 129
},
{
"epoch": 0.9403254972875226,
"grad_norm": 0.09522847831249237,
"learning_rate": 5.387453874538746e-05,
"loss": 0.434,
"step": 130
},
{
"epoch": 0.9475587703435805,
"grad_norm": 0.11293577402830124,
"learning_rate": 5.3505535055350554e-05,
"loss": 0.3895,
"step": 131
},
{
"epoch": 0.9547920433996383,
"grad_norm": 0.08971936255693436,
"learning_rate": 5.3136531365313655e-05,
"loss": 0.4036,
"step": 132
},
{
"epoch": 0.9620253164556962,
"grad_norm": 0.0903216153383255,
"learning_rate": 5.276752767527675e-05,
"loss": 0.409,
"step": 133
},
{
"epoch": 0.969258589511754,
"grad_norm": 0.09261274337768555,
"learning_rate": 5.239852398523986e-05,
"loss": 0.4091,
"step": 134
},
{
"epoch": 0.976491862567812,
"grad_norm": 0.08835142105817795,
"learning_rate": 5.202952029520295e-05,
"loss": 0.4108,
"step": 135
},
{
"epoch": 0.9837251356238698,
"grad_norm": 0.09010673314332962,
"learning_rate": 5.166051660516605e-05,
"loss": 0.3736,
"step": 136
},
{
"epoch": 0.9909584086799277,
"grad_norm": 0.09533007442951202,
"learning_rate": 5.129151291512916e-05,
"loss": 0.3847,
"step": 137
},
{
"epoch": 0.9981916817359855,
"grad_norm": 0.10801621526479721,
"learning_rate": 5.0922509225092254e-05,
"loss": 0.4156,
"step": 138
},
{
"epoch": 1.0054249547920433,
"grad_norm": 0.11016502231359482,
"learning_rate": 5.0553505535055354e-05,
"loss": 0.3826,
"step": 139
},
{
"epoch": 1.0126582278481013,
"grad_norm": 0.0894998237490654,
"learning_rate": 5.018450184501845e-05,
"loss": 0.3728,
"step": 140
},
{
"epoch": 1.0198915009041591,
"grad_norm": 0.09203777462244034,
"learning_rate": 4.9815498154981556e-05,
"loss": 0.3744,
"step": 141
},
{
"epoch": 1.027124773960217,
"grad_norm": 0.09373292326927185,
"learning_rate": 4.944649446494466e-05,
"loss": 0.3901,
"step": 142
},
{
"epoch": 1.0343580470162748,
"grad_norm": 0.09995180368423462,
"learning_rate": 4.907749077490775e-05,
"loss": 0.4163,
"step": 143
},
{
"epoch": 1.0415913200723328,
"grad_norm": 0.09669777750968933,
"learning_rate": 4.870848708487085e-05,
"loss": 0.3637,
"step": 144
},
{
"epoch": 1.0488245931283906,
"grad_norm": 0.09966889768838882,
"learning_rate": 4.833948339483395e-05,
"loss": 0.3968,
"step": 145
},
{
"epoch": 1.0560578661844484,
"grad_norm": 0.09891881793737411,
"learning_rate": 4.797047970479705e-05,
"loss": 0.3493,
"step": 146
},
{
"epoch": 1.0632911392405062,
"grad_norm": 0.10730701684951782,
"learning_rate": 4.760147601476015e-05,
"loss": 0.3589,
"step": 147
},
{
"epoch": 1.0705244122965643,
"grad_norm": 0.2513408660888672,
"learning_rate": 4.723247232472325e-05,
"loss": 0.3956,
"step": 148
},
{
"epoch": 1.077757685352622,
"grad_norm": 0.11056191474199295,
"learning_rate": 4.686346863468635e-05,
"loss": 0.3704,
"step": 149
},
{
"epoch": 1.0849909584086799,
"grad_norm": 0.1271658092737198,
"learning_rate": 4.6494464944649444e-05,
"loss": 0.3505,
"step": 150
},
{
"epoch": 1.092224231464738,
"grad_norm": 0.10155227780342102,
"learning_rate": 4.612546125461255e-05,
"loss": 0.3737,
"step": 151
},
{
"epoch": 1.0994575045207957,
"grad_norm": 0.10666938871145248,
"learning_rate": 4.575645756457565e-05,
"loss": 0.3743,
"step": 152
},
{
"epoch": 1.1066907775768535,
"grad_norm": 0.1048259511590004,
"learning_rate": 4.5387453874538747e-05,
"loss": 0.3598,
"step": 153
},
{
"epoch": 1.1139240506329113,
"grad_norm": 0.10699986666440964,
"learning_rate": 4.501845018450185e-05,
"loss": 0.4048,
"step": 154
},
{
"epoch": 1.1211573236889691,
"grad_norm": 0.09939727187156677,
"learning_rate": 4.464944649446495e-05,
"loss": 0.3325,
"step": 155
},
{
"epoch": 1.1283905967450272,
"grad_norm": 0.1125202625989914,
"learning_rate": 4.428044280442805e-05,
"loss": 0.3404,
"step": 156
},
{
"epoch": 1.135623869801085,
"grad_norm": 0.09763387590646744,
"learning_rate": 4.391143911439114e-05,
"loss": 0.407,
"step": 157
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.10112911462783813,
"learning_rate": 4.3542435424354244e-05,
"loss": 0.4015,
"step": 158
},
{
"epoch": 1.1500904159132008,
"grad_norm": 0.10274617373943329,
"learning_rate": 4.3173431734317345e-05,
"loss": 0.3506,
"step": 159
},
{
"epoch": 1.1573236889692586,
"grad_norm": 0.1066206619143486,
"learning_rate": 4.280442804428044e-05,
"loss": 0.3732,
"step": 160
},
{
"epoch": 1.1645569620253164,
"grad_norm": 0.10560917854309082,
"learning_rate": 4.243542435424355e-05,
"loss": 0.3672,
"step": 161
},
{
"epoch": 1.1717902350813743,
"grad_norm": 0.11759281903505325,
"learning_rate": 4.206642066420665e-05,
"loss": 0.3762,
"step": 162
},
{
"epoch": 1.179023508137432,
"grad_norm": 0.10840923339128494,
"learning_rate": 4.169741697416974e-05,
"loss": 0.3682,
"step": 163
},
{
"epoch": 1.18625678119349,
"grad_norm": 0.11086386442184448,
"learning_rate": 4.132841328413284e-05,
"loss": 0.3614,
"step": 164
},
{
"epoch": 1.193490054249548,
"grad_norm": 0.1039872020483017,
"learning_rate": 4.0959409594095944e-05,
"loss": 0.3694,
"step": 165
},
{
"epoch": 1.2007233273056057,
"grad_norm": 0.104191854596138,
"learning_rate": 4.0590405904059045e-05,
"loss": 0.3472,
"step": 166
},
{
"epoch": 1.2079566003616637,
"grad_norm": 0.10681155323982239,
"learning_rate": 4.022140221402214e-05,
"loss": 0.366,
"step": 167
},
{
"epoch": 1.2151898734177216,
"grad_norm": 0.10887061804533005,
"learning_rate": 3.985239852398524e-05,
"loss": 0.3604,
"step": 168
},
{
"epoch": 1.2224231464737794,
"grad_norm": 0.11261174827814102,
"learning_rate": 3.948339483394834e-05,
"loss": 0.3258,
"step": 169
},
{
"epoch": 1.2296564195298372,
"grad_norm": 0.11184070259332657,
"learning_rate": 3.911439114391144e-05,
"loss": 0.3403,
"step": 170
},
{
"epoch": 1.2368896925858952,
"grad_norm": 0.10564827173948288,
"learning_rate": 3.874538745387454e-05,
"loss": 0.3541,
"step": 171
},
{
"epoch": 1.244122965641953,
"grad_norm": 0.11773871630430222,
"learning_rate": 3.837638376383764e-05,
"loss": 0.3853,
"step": 172
},
{
"epoch": 1.2513562386980108,
"grad_norm": 0.10713667422533035,
"learning_rate": 3.8007380073800744e-05,
"loss": 0.3185,
"step": 173
},
{
"epoch": 1.2585895117540686,
"grad_norm": 0.10778232663869858,
"learning_rate": 3.763837638376384e-05,
"loss": 0.3211,
"step": 174
},
{
"epoch": 1.2658227848101267,
"grad_norm": 0.10577027499675751,
"learning_rate": 3.726937269372694e-05,
"loss": 0.3393,
"step": 175
},
{
"epoch": 1.2730560578661845,
"grad_norm": 0.11202805489301682,
"learning_rate": 3.690036900369004e-05,
"loss": 0.3582,
"step": 176
},
{
"epoch": 1.2802893309222423,
"grad_norm": 0.10652373731136322,
"learning_rate": 3.653136531365314e-05,
"loss": 0.3456,
"step": 177
},
{
"epoch": 1.2875226039783003,
"grad_norm": 0.14714586734771729,
"learning_rate": 3.6162361623616235e-05,
"loss": 0.3417,
"step": 178
},
{
"epoch": 1.2947558770343581,
"grad_norm": 0.10715153813362122,
"learning_rate": 3.5793357933579336e-05,
"loss": 0.3412,
"step": 179
},
{
"epoch": 1.301989150090416,
"grad_norm": 0.11732357740402222,
"learning_rate": 3.542435424354244e-05,
"loss": 0.3792,
"step": 180
},
{
"epoch": 1.3092224231464737,
"grad_norm": 0.17690294981002808,
"learning_rate": 3.505535055350554e-05,
"loss": 0.3686,
"step": 181
},
{
"epoch": 1.3164556962025316,
"grad_norm": 0.10941877961158752,
"learning_rate": 3.468634686346864e-05,
"loss": 0.3582,
"step": 182
},
{
"epoch": 1.3236889692585896,
"grad_norm": 0.14567327499389648,
"learning_rate": 3.431734317343174e-05,
"loss": 0.336,
"step": 183
},
{
"epoch": 1.3309222423146474,
"grad_norm": 0.12351348996162415,
"learning_rate": 3.3948339483394833e-05,
"loss": 0.3533,
"step": 184
},
{
"epoch": 1.3381555153707052,
"grad_norm": 0.11480335891246796,
"learning_rate": 3.3579335793357934e-05,
"loss": 0.3482,
"step": 185
},
{
"epoch": 1.3453887884267632,
"grad_norm": 0.12205879390239716,
"learning_rate": 3.3210332103321035e-05,
"loss": 0.3385,
"step": 186
},
{
"epoch": 1.352622061482821,
"grad_norm": 0.11025048792362213,
"learning_rate": 3.2841328413284136e-05,
"loss": 0.3578,
"step": 187
},
{
"epoch": 1.3598553345388789,
"grad_norm": 0.11126164346933365,
"learning_rate": 3.247232472324723e-05,
"loss": 0.3307,
"step": 188
},
{
"epoch": 1.3670886075949367,
"grad_norm": 0.1165134385228157,
"learning_rate": 3.210332103321033e-05,
"loss": 0.3789,
"step": 189
},
{
"epoch": 1.3743218806509945,
"grad_norm": 0.11351227760314941,
"learning_rate": 3.173431734317343e-05,
"loss": 0.3996,
"step": 190
},
{
"epoch": 1.3815551537070525,
"grad_norm": 0.13154295086860657,
"learning_rate": 3.136531365313653e-05,
"loss": 0.3965,
"step": 191
},
{
"epoch": 1.3887884267631103,
"grad_norm": 0.11638002842664719,
"learning_rate": 3.0996309963099634e-05,
"loss": 0.3801,
"step": 192
},
{
"epoch": 1.3960216998191681,
"grad_norm": 0.11662168800830841,
"learning_rate": 3.0627306273062735e-05,
"loss": 0.3358,
"step": 193
},
{
"epoch": 1.4032549728752262,
"grad_norm": 0.11403173208236694,
"learning_rate": 3.0258302583025832e-05,
"loss": 0.3158,
"step": 194
},
{
"epoch": 1.410488245931284,
"grad_norm": 0.11493474245071411,
"learning_rate": 2.9889298892988933e-05,
"loss": 0.3533,
"step": 195
},
{
"epoch": 1.4177215189873418,
"grad_norm": 0.11141743510961533,
"learning_rate": 2.952029520295203e-05,
"loss": 0.353,
"step": 196
},
{
"epoch": 1.4249547920433996,
"grad_norm": 0.12248177826404572,
"learning_rate": 2.915129151291513e-05,
"loss": 0.352,
"step": 197
},
{
"epoch": 1.4321880650994574,
"grad_norm": 0.1919812560081482,
"learning_rate": 2.878228782287823e-05,
"loss": 0.3796,
"step": 198
},
{
"epoch": 1.4394213381555154,
"grad_norm": 0.11889157444238663,
"learning_rate": 2.8413284132841326e-05,
"loss": 0.3685,
"step": 199
},
{
"epoch": 1.4466546112115732,
"grad_norm": 0.11996401846408844,
"learning_rate": 2.8044280442804427e-05,
"loss": 0.3215,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 276,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.61836422398301e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}