prxy5607's picture
Training in progress, step 200, checkpoint
95142f1 verified
{
"best_metric": 0.35439521074295044,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.012984905047881837,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.492452523940918e-05,
"grad_norm": 4.540472507476807,
"learning_rate": 1e-05,
"loss": 2.1793,
"step": 1
},
{
"epoch": 6.492452523940918e-05,
"eval_loss": 0.558050274848938,
"eval_runtime": 2718.0417,
"eval_samples_per_second": 9.544,
"eval_steps_per_second": 2.386,
"step": 1
},
{
"epoch": 0.00012984905047881836,
"grad_norm": 6.050493240356445,
"learning_rate": 2e-05,
"loss": 2.7294,
"step": 2
},
{
"epoch": 0.00019477357571822757,
"grad_norm": 4.454987525939941,
"learning_rate": 3e-05,
"loss": 2.1744,
"step": 3
},
{
"epoch": 0.0002596981009576367,
"grad_norm": 3.7849512100219727,
"learning_rate": 4e-05,
"loss": 2.436,
"step": 4
},
{
"epoch": 0.00032462262619704593,
"grad_norm": 4.17337703704834,
"learning_rate": 5e-05,
"loss": 1.824,
"step": 5
},
{
"epoch": 0.00038954715143645513,
"grad_norm": 3.64642071723938,
"learning_rate": 6e-05,
"loss": 2.1666,
"step": 6
},
{
"epoch": 0.0004544716766758643,
"grad_norm": 3.3155298233032227,
"learning_rate": 7e-05,
"loss": 2.1527,
"step": 7
},
{
"epoch": 0.0005193962019152734,
"grad_norm": 3.41972279548645,
"learning_rate": 8e-05,
"loss": 2.037,
"step": 8
},
{
"epoch": 0.0005843207271546826,
"grad_norm": 3.2290754318237305,
"learning_rate": 9e-05,
"loss": 1.7392,
"step": 9
},
{
"epoch": 0.0006492452523940919,
"grad_norm": 2.8902621269226074,
"learning_rate": 0.0001,
"loss": 1.9498,
"step": 10
},
{
"epoch": 0.0007141697776335011,
"grad_norm": 3.157255172729492,
"learning_rate": 9.999316524962345e-05,
"loss": 1.6533,
"step": 11
},
{
"epoch": 0.0007790943028729103,
"grad_norm": 2.9064061641693115,
"learning_rate": 9.997266286704631e-05,
"loss": 1.8565,
"step": 12
},
{
"epoch": 0.0008440188281123195,
"grad_norm": 2.6529300212860107,
"learning_rate": 9.993849845741524e-05,
"loss": 1.5706,
"step": 13
},
{
"epoch": 0.0009089433533517286,
"grad_norm": 3.223670482635498,
"learning_rate": 9.989068136093873e-05,
"loss": 1.6391,
"step": 14
},
{
"epoch": 0.0009738678785911378,
"grad_norm": 2.7879912853240967,
"learning_rate": 9.98292246503335e-05,
"loss": 1.4823,
"step": 15
},
{
"epoch": 0.0010387924038305469,
"grad_norm": 2.6725378036499023,
"learning_rate": 9.975414512725057e-05,
"loss": 1.4706,
"step": 16
},
{
"epoch": 0.0011037169290699562,
"grad_norm": 2.8224666118621826,
"learning_rate": 9.966546331768191e-05,
"loss": 1.8116,
"step": 17
},
{
"epoch": 0.0011686414543093653,
"grad_norm": 3.0412087440490723,
"learning_rate": 9.956320346634876e-05,
"loss": 1.5247,
"step": 18
},
{
"epoch": 0.0012335659795487746,
"grad_norm": 2.8261916637420654,
"learning_rate": 9.944739353007344e-05,
"loss": 1.4624,
"step": 19
},
{
"epoch": 0.0012984905047881837,
"grad_norm": 2.813544988632202,
"learning_rate": 9.931806517013612e-05,
"loss": 1.5645,
"step": 20
},
{
"epoch": 0.001363415030027593,
"grad_norm": 2.84627103805542,
"learning_rate": 9.917525374361912e-05,
"loss": 1.7431,
"step": 21
},
{
"epoch": 0.0014283395552670021,
"grad_norm": 2.620612144470215,
"learning_rate": 9.901899829374047e-05,
"loss": 1.5353,
"step": 22
},
{
"epoch": 0.0014932640805064112,
"grad_norm": 2.7557530403137207,
"learning_rate": 9.884934153917997e-05,
"loss": 1.5994,
"step": 23
},
{
"epoch": 0.0015581886057458205,
"grad_norm": 2.5682120323181152,
"learning_rate": 9.86663298624003e-05,
"loss": 1.4973,
"step": 24
},
{
"epoch": 0.0016231131309852296,
"grad_norm": 2.783432960510254,
"learning_rate": 9.847001329696653e-05,
"loss": 1.652,
"step": 25
},
{
"epoch": 0.001688037656224639,
"grad_norm": 2.9067819118499756,
"learning_rate": 9.826044551386744e-05,
"loss": 1.4789,
"step": 26
},
{
"epoch": 0.001752962181464048,
"grad_norm": 2.970757484436035,
"learning_rate": 9.803768380684242e-05,
"loss": 1.5241,
"step": 27
},
{
"epoch": 0.0018178867067034571,
"grad_norm": 2.8803842067718506,
"learning_rate": 9.780178907671789e-05,
"loss": 1.6238,
"step": 28
},
{
"epoch": 0.0018828112319428665,
"grad_norm": 2.8296127319335938,
"learning_rate": 9.755282581475769e-05,
"loss": 1.5477,
"step": 29
},
{
"epoch": 0.0019477357571822756,
"grad_norm": 2.8743557929992676,
"learning_rate": 9.729086208503174e-05,
"loss": 1.5855,
"step": 30
},
{
"epoch": 0.002012660282421685,
"grad_norm": 3.8598484992980957,
"learning_rate": 9.701596950580806e-05,
"loss": 1.4768,
"step": 31
},
{
"epoch": 0.0020775848076610938,
"grad_norm": 4.713948726654053,
"learning_rate": 9.672822322997305e-05,
"loss": 1.458,
"step": 32
},
{
"epoch": 0.002142509332900503,
"grad_norm": 2.804553270339966,
"learning_rate": 9.642770192448536e-05,
"loss": 1.3238,
"step": 33
},
{
"epoch": 0.0022074338581399124,
"grad_norm": 3.3731460571289062,
"learning_rate": 9.611448774886924e-05,
"loss": 1.7495,
"step": 34
},
{
"epoch": 0.0022723583833793217,
"grad_norm": 3.1557204723358154,
"learning_rate": 9.578866633275288e-05,
"loss": 1.4697,
"step": 35
},
{
"epoch": 0.0023372829086187306,
"grad_norm": 3.188981056213379,
"learning_rate": 9.545032675245813e-05,
"loss": 1.4911,
"step": 36
},
{
"epoch": 0.00240220743385814,
"grad_norm": 3.202531337738037,
"learning_rate": 9.509956150664796e-05,
"loss": 1.3946,
"step": 37
},
{
"epoch": 0.0024671319590975492,
"grad_norm": 3.1800291538238525,
"learning_rate": 9.473646649103818e-05,
"loss": 1.5396,
"step": 38
},
{
"epoch": 0.002532056484336958,
"grad_norm": 3.344865322113037,
"learning_rate": 9.43611409721806e-05,
"loss": 1.5526,
"step": 39
},
{
"epoch": 0.0025969810095763674,
"grad_norm": 3.3695080280303955,
"learning_rate": 9.397368756032445e-05,
"loss": 1.577,
"step": 40
},
{
"epoch": 0.0026619055348157767,
"grad_norm": 3.3032450675964355,
"learning_rate": 9.357421218136386e-05,
"loss": 1.4475,
"step": 41
},
{
"epoch": 0.002726830060055186,
"grad_norm": 3.5895912647247314,
"learning_rate": 9.316282404787871e-05,
"loss": 1.5896,
"step": 42
},
{
"epoch": 0.002791754585294595,
"grad_norm": 3.4149351119995117,
"learning_rate": 9.273963562927695e-05,
"loss": 1.3504,
"step": 43
},
{
"epoch": 0.0028566791105340042,
"grad_norm": 3.534778118133545,
"learning_rate": 9.230476262104677e-05,
"loss": 1.3705,
"step": 44
},
{
"epoch": 0.0029216036357734136,
"grad_norm": 3.6278538703918457,
"learning_rate": 9.185832391312644e-05,
"loss": 1.6635,
"step": 45
},
{
"epoch": 0.0029865281610128224,
"grad_norm": 3.4827487468719482,
"learning_rate": 9.140044155740101e-05,
"loss": 1.4033,
"step": 46
},
{
"epoch": 0.0030514526862522318,
"grad_norm": 3.8299813270568848,
"learning_rate": 9.093124073433463e-05,
"loss": 1.5587,
"step": 47
},
{
"epoch": 0.003116377211491641,
"grad_norm": 4.618851184844971,
"learning_rate": 9.045084971874738e-05,
"loss": 1.4306,
"step": 48
},
{
"epoch": 0.00318130173673105,
"grad_norm": 4.444945812225342,
"learning_rate": 8.995939984474624e-05,
"loss": 1.5862,
"step": 49
},
{
"epoch": 0.0032462262619704593,
"grad_norm": 6.026591777801514,
"learning_rate": 8.945702546981969e-05,
"loss": 1.8037,
"step": 50
},
{
"epoch": 0.0032462262619704593,
"eval_loss": 0.42139238119125366,
"eval_runtime": 2729.0429,
"eval_samples_per_second": 9.506,
"eval_steps_per_second": 2.377,
"step": 50
},
{
"epoch": 0.0033111507872098686,
"grad_norm": 4.079866409301758,
"learning_rate": 8.894386393810563e-05,
"loss": 2.0916,
"step": 51
},
{
"epoch": 0.003376075312449278,
"grad_norm": 3.098527669906616,
"learning_rate": 8.842005554284296e-05,
"loss": 2.2287,
"step": 52
},
{
"epoch": 0.003440999837688687,
"grad_norm": 2.1626501083374023,
"learning_rate": 8.788574348801675e-05,
"loss": 1.7575,
"step": 53
},
{
"epoch": 0.003505924362928096,
"grad_norm": 1.966941475868225,
"learning_rate": 8.73410738492077e-05,
"loss": 1.8485,
"step": 54
},
{
"epoch": 0.0035708488881675054,
"grad_norm": 3.7294154167175293,
"learning_rate": 8.678619553365659e-05,
"loss": 1.728,
"step": 55
},
{
"epoch": 0.0036357734134069143,
"grad_norm": 2.359339952468872,
"learning_rate": 8.622126023955446e-05,
"loss": 1.7742,
"step": 56
},
{
"epoch": 0.0037006979386463236,
"grad_norm": 2.1914119720458984,
"learning_rate": 8.564642241456986e-05,
"loss": 1.5247,
"step": 57
},
{
"epoch": 0.003765622463885733,
"grad_norm": 1.9947062730789185,
"learning_rate": 8.506183921362443e-05,
"loss": 1.5345,
"step": 58
},
{
"epoch": 0.003830546989125142,
"grad_norm": 2.044597864151001,
"learning_rate": 8.44676704559283e-05,
"loss": 1.6942,
"step": 59
},
{
"epoch": 0.003895471514364551,
"grad_norm": 2.267122507095337,
"learning_rate": 8.386407858128706e-05,
"loss": 1.7808,
"step": 60
},
{
"epoch": 0.0039603960396039604,
"grad_norm": 2.0266075134277344,
"learning_rate": 8.32512286056924e-05,
"loss": 1.6411,
"step": 61
},
{
"epoch": 0.00402532056484337,
"grad_norm": 2.1180357933044434,
"learning_rate": 8.262928807620843e-05,
"loss": 1.7271,
"step": 62
},
{
"epoch": 0.004090245090082779,
"grad_norm": 2.1186745166778564,
"learning_rate": 8.199842702516583e-05,
"loss": 1.4394,
"step": 63
},
{
"epoch": 0.0041551696153221875,
"grad_norm": 2.276301145553589,
"learning_rate": 8.135881792367686e-05,
"loss": 1.5767,
"step": 64
},
{
"epoch": 0.004220094140561597,
"grad_norm": 2.281355857849121,
"learning_rate": 8.07106356344834e-05,
"loss": 1.8338,
"step": 65
},
{
"epoch": 0.004285018665801006,
"grad_norm": 2.0677578449249268,
"learning_rate": 8.005405736415126e-05,
"loss": 1.2292,
"step": 66
},
{
"epoch": 0.0043499431910404155,
"grad_norm": 2.4492828845977783,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5271,
"step": 67
},
{
"epoch": 0.004414867716279825,
"grad_norm": 2.246931314468384,
"learning_rate": 7.871643313414718e-05,
"loss": 1.4539,
"step": 68
},
{
"epoch": 0.004479792241519234,
"grad_norm": 2.3499512672424316,
"learning_rate": 7.803575286758364e-05,
"loss": 1.6166,
"step": 69
},
{
"epoch": 0.004544716766758643,
"grad_norm": 2.2578580379486084,
"learning_rate": 7.734740790612136e-05,
"loss": 1.3928,
"step": 70
},
{
"epoch": 0.004609641291998052,
"grad_norm": 2.352757692337036,
"learning_rate": 7.66515864363997e-05,
"loss": 1.5195,
"step": 71
},
{
"epoch": 0.004674565817237461,
"grad_norm": 2.227308988571167,
"learning_rate": 7.594847868906076e-05,
"loss": 1.4991,
"step": 72
},
{
"epoch": 0.0047394903424768705,
"grad_norm": 2.5206069946289062,
"learning_rate": 7.52382768867422e-05,
"loss": 1.6782,
"step": 73
},
{
"epoch": 0.00480441486771628,
"grad_norm": 2.4039251804351807,
"learning_rate": 7.452117519152542e-05,
"loss": 1.357,
"step": 74
},
{
"epoch": 0.004869339392955689,
"grad_norm": 2.5262157917022705,
"learning_rate": 7.379736965185368e-05,
"loss": 1.4331,
"step": 75
},
{
"epoch": 0.0049342639181950984,
"grad_norm": 2.4065189361572266,
"learning_rate": 7.30670581489344e-05,
"loss": 1.5405,
"step": 76
},
{
"epoch": 0.004999188443434508,
"grad_norm": 2.6150152683258057,
"learning_rate": 7.233044034264034e-05,
"loss": 1.5466,
"step": 77
},
{
"epoch": 0.005064112968673916,
"grad_norm": 2.3654088973999023,
"learning_rate": 7.158771761692464e-05,
"loss": 1.3768,
"step": 78
},
{
"epoch": 0.0051290374939133255,
"grad_norm": 2.36637544631958,
"learning_rate": 7.083909302476453e-05,
"loss": 1.5675,
"step": 79
},
{
"epoch": 0.005193962019152735,
"grad_norm": 2.498779535293579,
"learning_rate": 7.008477123264848e-05,
"loss": 1.5529,
"step": 80
},
{
"epoch": 0.005258886544392144,
"grad_norm": 2.4871792793273926,
"learning_rate": 6.932495846462261e-05,
"loss": 1.5426,
"step": 81
},
{
"epoch": 0.0053238110696315535,
"grad_norm": 2.462024211883545,
"learning_rate": 6.855986244591104e-05,
"loss": 1.2875,
"step": 82
},
{
"epoch": 0.005388735594870963,
"grad_norm": 2.650010108947754,
"learning_rate": 6.778969234612584e-05,
"loss": 1.4617,
"step": 83
},
{
"epoch": 0.005453660120110372,
"grad_norm": 2.7433016300201416,
"learning_rate": 6.701465872208216e-05,
"loss": 1.4802,
"step": 84
},
{
"epoch": 0.0055185846453497805,
"grad_norm": 2.771566867828369,
"learning_rate": 6.623497346023418e-05,
"loss": 1.5809,
"step": 85
},
{
"epoch": 0.00558350917058919,
"grad_norm": 2.490809440612793,
"learning_rate": 6.545084971874738e-05,
"loss": 1.2319,
"step": 86
},
{
"epoch": 0.005648433695828599,
"grad_norm": 2.7578866481781006,
"learning_rate": 6.466250186922325e-05,
"loss": 1.4479,
"step": 87
},
{
"epoch": 0.0057133582210680085,
"grad_norm": 2.8412978649139404,
"learning_rate": 6.387014543809223e-05,
"loss": 1.4386,
"step": 88
},
{
"epoch": 0.005778282746307418,
"grad_norm": 2.8536527156829834,
"learning_rate": 6.307399704769099e-05,
"loss": 1.3228,
"step": 89
},
{
"epoch": 0.005843207271546827,
"grad_norm": 2.8993990421295166,
"learning_rate": 6.227427435703997e-05,
"loss": 1.453,
"step": 90
},
{
"epoch": 0.005908131796786236,
"grad_norm": 3.1611733436584473,
"learning_rate": 6.147119600233758e-05,
"loss": 1.5174,
"step": 91
},
{
"epoch": 0.005973056322025645,
"grad_norm": 2.999743700027466,
"learning_rate": 6.066498153718735e-05,
"loss": 1.3884,
"step": 92
},
{
"epoch": 0.006037980847265054,
"grad_norm": 3.1500251293182373,
"learning_rate": 5.985585137257401e-05,
"loss": 1.3779,
"step": 93
},
{
"epoch": 0.0061029053725044635,
"grad_norm": 3.3458006381988525,
"learning_rate": 5.90440267166055e-05,
"loss": 1.5905,
"step": 94
},
{
"epoch": 0.006167829897743873,
"grad_norm": 3.151742935180664,
"learning_rate": 5.8229729514036705e-05,
"loss": 1.3328,
"step": 95
},
{
"epoch": 0.006232754422983282,
"grad_norm": 3.4480724334716797,
"learning_rate": 5.74131823855921e-05,
"loss": 1.4021,
"step": 96
},
{
"epoch": 0.0062976789482226915,
"grad_norm": 3.1579878330230713,
"learning_rate": 5.6594608567103456e-05,
"loss": 1.2967,
"step": 97
},
{
"epoch": 0.0063626034734621,
"grad_norm": 3.8392672538757324,
"learning_rate": 5.577423184847932e-05,
"loss": 1.42,
"step": 98
},
{
"epoch": 0.006427527998701509,
"grad_norm": 3.4535746574401855,
"learning_rate": 5.495227651252315e-05,
"loss": 1.1299,
"step": 99
},
{
"epoch": 0.0064924525239409185,
"grad_norm": 5.015627861022949,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.5899,
"step": 100
},
{
"epoch": 0.0064924525239409185,
"eval_loss": 0.39287006855010986,
"eval_runtime": 2725.3783,
"eval_samples_per_second": 9.518,
"eval_steps_per_second": 2.38,
"step": 100
},
{
"epoch": 0.006557377049180328,
"grad_norm": 3.354386329650879,
"learning_rate": 5.330452921628497e-05,
"loss": 2.0273,
"step": 101
},
{
"epoch": 0.006622301574419737,
"grad_norm": 2.5337774753570557,
"learning_rate": 5.247918773366112e-05,
"loss": 1.5614,
"step": 102
},
{
"epoch": 0.0066872260996591465,
"grad_norm": 2.057063341140747,
"learning_rate": 5.165316846586541e-05,
"loss": 1.8339,
"step": 103
},
{
"epoch": 0.006752150624898556,
"grad_norm": 1.8221439123153687,
"learning_rate": 5.0826697238317935e-05,
"loss": 1.55,
"step": 104
},
{
"epoch": 0.006817075150137964,
"grad_norm": 1.7888840436935425,
"learning_rate": 5e-05,
"loss": 1.8628,
"step": 105
},
{
"epoch": 0.006881999675377374,
"grad_norm": 1.8267186880111694,
"learning_rate": 4.917330276168208e-05,
"loss": 1.5607,
"step": 106
},
{
"epoch": 0.006946924200616783,
"grad_norm": 1.9880849123001099,
"learning_rate": 4.834683153413459e-05,
"loss": 1.6143,
"step": 107
},
{
"epoch": 0.007011848725856192,
"grad_norm": 1.8925626277923584,
"learning_rate": 4.7520812266338885e-05,
"loss": 1.5868,
"step": 108
},
{
"epoch": 0.0070767732510956015,
"grad_norm": 2.010892152786255,
"learning_rate": 4.669547078371504e-05,
"loss": 1.6561,
"step": 109
},
{
"epoch": 0.007141697776335011,
"grad_norm": 1.9386076927185059,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.2943,
"step": 110
},
{
"epoch": 0.00720662230157442,
"grad_norm": 1.9109869003295898,
"learning_rate": 4.504772348747687e-05,
"loss": 1.4862,
"step": 111
},
{
"epoch": 0.007271546826813829,
"grad_norm": 2.005671501159668,
"learning_rate": 4.4225768151520694e-05,
"loss": 1.534,
"step": 112
},
{
"epoch": 0.007336471352053238,
"grad_norm": 2.043527603149414,
"learning_rate": 4.3405391432896555e-05,
"loss": 1.7753,
"step": 113
},
{
"epoch": 0.007401395877292647,
"grad_norm": 2.0597848892211914,
"learning_rate": 4.2586817614407895e-05,
"loss": 1.6781,
"step": 114
},
{
"epoch": 0.0074663204025320565,
"grad_norm": 2.056081771850586,
"learning_rate": 4.17702704859633e-05,
"loss": 1.6857,
"step": 115
},
{
"epoch": 0.007531244927771466,
"grad_norm": 1.9409863948822021,
"learning_rate": 4.095597328339452e-05,
"loss": 1.3481,
"step": 116
},
{
"epoch": 0.007596169453010875,
"grad_norm": 2.038977861404419,
"learning_rate": 4.0144148627425993e-05,
"loss": 1.3336,
"step": 117
},
{
"epoch": 0.007661093978250284,
"grad_norm": 1.9617688655853271,
"learning_rate": 3.933501846281267e-05,
"loss": 1.3789,
"step": 118
},
{
"epoch": 0.007726018503489693,
"grad_norm": 2.061816692352295,
"learning_rate": 3.852880399766243e-05,
"loss": 1.4228,
"step": 119
},
{
"epoch": 0.007790943028729102,
"grad_norm": 2.1589889526367188,
"learning_rate": 3.772572564296005e-05,
"loss": 1.3424,
"step": 120
},
{
"epoch": 0.007855867553968512,
"grad_norm": 2.100494384765625,
"learning_rate": 3.6926002952309016e-05,
"loss": 1.4676,
"step": 121
},
{
"epoch": 0.007920792079207921,
"grad_norm": 2.2070977687835693,
"learning_rate": 3.612985456190778e-05,
"loss": 1.6314,
"step": 122
},
{
"epoch": 0.00798571660444733,
"grad_norm": 2.2676262855529785,
"learning_rate": 3.533749813077677e-05,
"loss": 1.45,
"step": 123
},
{
"epoch": 0.00805064112968674,
"grad_norm": 2.1959469318389893,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.3145,
"step": 124
},
{
"epoch": 0.008115565654926148,
"grad_norm": 2.2538602352142334,
"learning_rate": 3.3765026539765834e-05,
"loss": 1.4348,
"step": 125
},
{
"epoch": 0.008180490180165558,
"grad_norm": 2.3079259395599365,
"learning_rate": 3.298534127791785e-05,
"loss": 1.364,
"step": 126
},
{
"epoch": 0.008245414705404967,
"grad_norm": 2.487854242324829,
"learning_rate": 3.221030765387417e-05,
"loss": 1.5644,
"step": 127
},
{
"epoch": 0.008310339230644375,
"grad_norm": 2.407414674758911,
"learning_rate": 3.144013755408895e-05,
"loss": 1.3611,
"step": 128
},
{
"epoch": 0.008375263755883785,
"grad_norm": 2.470609426498413,
"learning_rate": 3.0675041535377405e-05,
"loss": 1.4366,
"step": 129
},
{
"epoch": 0.008440188281123194,
"grad_norm": 2.462022304534912,
"learning_rate": 2.991522876735154e-05,
"loss": 1.2699,
"step": 130
},
{
"epoch": 0.008505112806362604,
"grad_norm": 2.4126827716827393,
"learning_rate": 2.916090697523549e-05,
"loss": 1.3328,
"step": 131
},
{
"epoch": 0.008570037331602012,
"grad_norm": 2.437617063522339,
"learning_rate": 2.8412282383075363e-05,
"loss": 1.3357,
"step": 132
},
{
"epoch": 0.008634961856841422,
"grad_norm": 2.4791176319122314,
"learning_rate": 2.766955965735968e-05,
"loss": 1.4554,
"step": 133
},
{
"epoch": 0.008699886382080831,
"grad_norm": 2.4162757396698,
"learning_rate": 2.693294185106562e-05,
"loss": 1.371,
"step": 134
},
{
"epoch": 0.00876481090732024,
"grad_norm": 2.373032569885254,
"learning_rate": 2.6202630348146324e-05,
"loss": 1.1934,
"step": 135
},
{
"epoch": 0.00882973543255965,
"grad_norm": 2.6824629306793213,
"learning_rate": 2.547882480847461e-05,
"loss": 1.5817,
"step": 136
},
{
"epoch": 0.008894659957799058,
"grad_norm": 2.456705093383789,
"learning_rate": 2.476172311325783e-05,
"loss": 1.1805,
"step": 137
},
{
"epoch": 0.008959584483038468,
"grad_norm": 2.6049578189849854,
"learning_rate": 2.405152131093926e-05,
"loss": 1.2282,
"step": 138
},
{
"epoch": 0.009024509008277877,
"grad_norm": 2.7366414070129395,
"learning_rate": 2.3348413563600325e-05,
"loss": 1.2222,
"step": 139
},
{
"epoch": 0.009089433533517287,
"grad_norm": 2.865466594696045,
"learning_rate": 2.2652592093878666e-05,
"loss": 1.3415,
"step": 140
},
{
"epoch": 0.009154358058756695,
"grad_norm": 3.0013866424560547,
"learning_rate": 2.196424713241637e-05,
"loss": 1.3793,
"step": 141
},
{
"epoch": 0.009219282583996104,
"grad_norm": 2.955798625946045,
"learning_rate": 2.128356686585282e-05,
"loss": 1.4376,
"step": 142
},
{
"epoch": 0.009284207109235514,
"grad_norm": 2.8766069412231445,
"learning_rate": 2.061073738537635e-05,
"loss": 1.3984,
"step": 143
},
{
"epoch": 0.009349131634474922,
"grad_norm": 2.926100254058838,
"learning_rate": 1.9945942635848748e-05,
"loss": 1.3543,
"step": 144
},
{
"epoch": 0.009414056159714333,
"grad_norm": 2.953508138656616,
"learning_rate": 1.928936436551661e-05,
"loss": 1.3205,
"step": 145
},
{
"epoch": 0.009478980684953741,
"grad_norm": 3.3534350395202637,
"learning_rate": 1.8641182076323148e-05,
"loss": 1.3131,
"step": 146
},
{
"epoch": 0.009543905210193151,
"grad_norm": 3.56013560295105,
"learning_rate": 1.800157297483417e-05,
"loss": 1.5387,
"step": 147
},
{
"epoch": 0.00960882973543256,
"grad_norm": 3.473095655441284,
"learning_rate": 1.7370711923791567e-05,
"loss": 1.313,
"step": 148
},
{
"epoch": 0.009673754260671968,
"grad_norm": 3.595202922821045,
"learning_rate": 1.6748771394307585e-05,
"loss": 1.2718,
"step": 149
},
{
"epoch": 0.009738678785911378,
"grad_norm": 4.7592339515686035,
"learning_rate": 1.6135921418712956e-05,
"loss": 1.5611,
"step": 150
},
{
"epoch": 0.009738678785911378,
"eval_loss": 0.3620188534259796,
"eval_runtime": 2727.0957,
"eval_samples_per_second": 9.512,
"eval_steps_per_second": 2.378,
"step": 150
},
{
"epoch": 0.009803603311150787,
"grad_norm": 1.852523922920227,
"learning_rate": 1.553232954407171e-05,
"loss": 1.42,
"step": 151
},
{
"epoch": 0.009868527836390197,
"grad_norm": 1.725640892982483,
"learning_rate": 1.4938160786375572e-05,
"loss": 1.3733,
"step": 152
},
{
"epoch": 0.009933452361629605,
"grad_norm": 1.6250057220458984,
"learning_rate": 1.435357758543015e-05,
"loss": 1.3382,
"step": 153
},
{
"epoch": 0.009998376886869016,
"grad_norm": 1.7621614933013916,
"learning_rate": 1.3778739760445552e-05,
"loss": 1.6195,
"step": 154
},
{
"epoch": 0.010063301412108424,
"grad_norm": 1.7789777517318726,
"learning_rate": 1.3213804466343421e-05,
"loss": 1.4922,
"step": 155
},
{
"epoch": 0.010128225937347832,
"grad_norm": 1.781243920326233,
"learning_rate": 1.2658926150792322e-05,
"loss": 1.6918,
"step": 156
},
{
"epoch": 0.010193150462587243,
"grad_norm": 1.7903107404708862,
"learning_rate": 1.2114256511983274e-05,
"loss": 1.6324,
"step": 157
},
{
"epoch": 0.010258074987826651,
"grad_norm": 1.7299500703811646,
"learning_rate": 1.157994445715706e-05,
"loss": 1.4214,
"step": 158
},
{
"epoch": 0.010322999513066061,
"grad_norm": 1.8235970735549927,
"learning_rate": 1.1056136061894384e-05,
"loss": 1.4777,
"step": 159
},
{
"epoch": 0.01038792403830547,
"grad_norm": 1.8660005331039429,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.7239,
"step": 160
},
{
"epoch": 0.01045284856354488,
"grad_norm": 1.848463773727417,
"learning_rate": 1.0040600155253765e-05,
"loss": 1.5992,
"step": 161
},
{
"epoch": 0.010517773088784288,
"grad_norm": 1.7882180213928223,
"learning_rate": 9.549150281252633e-06,
"loss": 1.48,
"step": 162
},
{
"epoch": 0.010582697614023697,
"grad_norm": 1.8663733005523682,
"learning_rate": 9.068759265665384e-06,
"loss": 1.4401,
"step": 163
},
{
"epoch": 0.010647622139263107,
"grad_norm": 1.9330668449401855,
"learning_rate": 8.599558442598998e-06,
"loss": 1.5743,
"step": 164
},
{
"epoch": 0.010712546664502515,
"grad_norm": 1.853959083557129,
"learning_rate": 8.141676086873572e-06,
"loss": 1.2428,
"step": 165
},
{
"epoch": 0.010777471189741926,
"grad_norm": 1.909489393234253,
"learning_rate": 7.695237378953223e-06,
"loss": 1.2757,
"step": 166
},
{
"epoch": 0.010842395714981334,
"grad_norm": 2.0268399715423584,
"learning_rate": 7.260364370723044e-06,
"loss": 1.5055,
"step": 167
},
{
"epoch": 0.010907320240220744,
"grad_norm": 1.9924023151397705,
"learning_rate": 6.837175952121306e-06,
"loss": 1.3611,
"step": 168
},
{
"epoch": 0.010972244765460153,
"grad_norm": 2.1283366680145264,
"learning_rate": 6.425787818636131e-06,
"loss": 1.6103,
"step": 169
},
{
"epoch": 0.011037169290699561,
"grad_norm": 2.1066362857818604,
"learning_rate": 6.026312439675552e-06,
"loss": 1.47,
"step": 170
},
{
"epoch": 0.011102093815938971,
"grad_norm": 2.1233506202697754,
"learning_rate": 5.6388590278194096e-06,
"loss": 1.478,
"step": 171
},
{
"epoch": 0.01116701834117838,
"grad_norm": 2.2296395301818848,
"learning_rate": 5.263533508961827e-06,
"loss": 1.3507,
"step": 172
},
{
"epoch": 0.01123194286641779,
"grad_norm": 2.076498031616211,
"learning_rate": 4.900438493352055e-06,
"loss": 1.3114,
"step": 173
},
{
"epoch": 0.011296867391657198,
"grad_norm": 2.3783020973205566,
"learning_rate": 4.549673247541875e-06,
"loss": 1.5188,
"step": 174
},
{
"epoch": 0.011361791916896609,
"grad_norm": 2.3257882595062256,
"learning_rate": 4.2113336672471245e-06,
"loss": 1.5652,
"step": 175
},
{
"epoch": 0.011426716442136017,
"grad_norm": 2.2322728633880615,
"learning_rate": 3.885512251130763e-06,
"loss": 1.3695,
"step": 176
},
{
"epoch": 0.011491640967375425,
"grad_norm": 2.375216484069824,
"learning_rate": 3.5722980755146517e-06,
"loss": 1.4681,
"step": 177
},
{
"epoch": 0.011556565492614836,
"grad_norm": 2.3548824787139893,
"learning_rate": 3.271776770026963e-06,
"loss": 1.453,
"step": 178
},
{
"epoch": 0.011621490017854244,
"grad_norm": 2.362833261489868,
"learning_rate": 2.9840304941919415e-06,
"loss": 1.3901,
"step": 179
},
{
"epoch": 0.011686414543093654,
"grad_norm": 2.3238675594329834,
"learning_rate": 2.7091379149682685e-06,
"loss": 1.2833,
"step": 180
},
{
"epoch": 0.011751339068333063,
"grad_norm": 2.2882113456726074,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.3704,
"step": 181
},
{
"epoch": 0.011816263593572471,
"grad_norm": 2.5618388652801514,
"learning_rate": 2.1982109232821178e-06,
"loss": 1.4673,
"step": 182
},
{
"epoch": 0.011881188118811881,
"grad_norm": 2.5349512100219727,
"learning_rate": 1.962316193157593e-06,
"loss": 1.3702,
"step": 183
},
{
"epoch": 0.01194611264405129,
"grad_norm": 2.7572004795074463,
"learning_rate": 1.7395544861325718e-06,
"loss": 1.3737,
"step": 184
},
{
"epoch": 0.0120110371692907,
"grad_norm": 2.699723720550537,
"learning_rate": 1.5299867030334814e-06,
"loss": 1.4351,
"step": 185
},
{
"epoch": 0.012075961694530108,
"grad_norm": 2.928107738494873,
"learning_rate": 1.333670137599713e-06,
"loss": 1.446,
"step": 186
},
{
"epoch": 0.012140886219769519,
"grad_norm": 2.8252618312835693,
"learning_rate": 1.1506584608200367e-06,
"loss": 1.4771,
"step": 187
},
{
"epoch": 0.012205810745008927,
"grad_norm": 2.6466662883758545,
"learning_rate": 9.810017062595322e-07,
"loss": 1.2121,
"step": 188
},
{
"epoch": 0.012270735270248335,
"grad_norm": 2.8890042304992676,
"learning_rate": 8.247462563808817e-07,
"loss": 1.3702,
"step": 189
},
{
"epoch": 0.012335659795487746,
"grad_norm": 2.6212081909179688,
"learning_rate": 6.819348298638839e-07,
"loss": 1.2293,
"step": 190
},
{
"epoch": 0.012400584320727154,
"grad_norm": 3.002800464630127,
"learning_rate": 5.526064699265753e-07,
"loss": 1.4438,
"step": 191
},
{
"epoch": 0.012465508845966564,
"grad_norm": 2.90425968170166,
"learning_rate": 4.367965336512403e-07,
"loss": 1.4433,
"step": 192
},
{
"epoch": 0.012530433371205973,
"grad_norm": 3.0153472423553467,
"learning_rate": 3.3453668231809286e-07,
"loss": 1.1824,
"step": 193
},
{
"epoch": 0.012595357896445383,
"grad_norm": 2.9073667526245117,
"learning_rate": 2.458548727494292e-07,
"loss": 1.3127,
"step": 194
},
{
"epoch": 0.012660282421684791,
"grad_norm": 3.1661148071289062,
"learning_rate": 1.7077534966650766e-07,
"loss": 1.3205,
"step": 195
},
{
"epoch": 0.0127252069469242,
"grad_norm": 3.3867547512054443,
"learning_rate": 1.0931863906127327e-07,
"loss": 1.4784,
"step": 196
},
{
"epoch": 0.01279013147216361,
"grad_norm": 3.4859659671783447,
"learning_rate": 6.150154258476315e-08,
"loss": 1.2741,
"step": 197
},
{
"epoch": 0.012855055997403018,
"grad_norm": 3.688143014907837,
"learning_rate": 2.7337132953697554e-08,
"loss": 1.4224,
"step": 198
},
{
"epoch": 0.012919980522642429,
"grad_norm": 4.207131862640381,
"learning_rate": 6.834750376549792e-09,
"loss": 1.2046,
"step": 199
},
{
"epoch": 0.012984905047881837,
"grad_norm": 4.53595495223999,
"learning_rate": 0.0,
"loss": 1.4319,
"step": 200
},
{
"epoch": 0.012984905047881837,
"eval_loss": 0.35439521074295044,
"eval_runtime": 2723.7858,
"eval_samples_per_second": 9.524,
"eval_steps_per_second": 2.381,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.0516032541661594e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}