|
{ |
|
"best_metric": 2.4680612087249756, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 1.4041246160596752, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.014041246160596753, |
|
"grad_norm": 0.8327537775039673, |
|
"learning_rate": 2e-05, |
|
"loss": 1.9615, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014041246160596753, |
|
"eval_loss": 3.3978569507598877, |
|
"eval_runtime": 0.5068, |
|
"eval_samples_per_second": 98.667, |
|
"eval_steps_per_second": 25.654, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.028082492321193506, |
|
"grad_norm": 1.332243800163269, |
|
"learning_rate": 4e-05, |
|
"loss": 1.824, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04212373848179026, |
|
"grad_norm": 1.4095451831817627, |
|
"learning_rate": 6e-05, |
|
"loss": 2.211, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05616498464238701, |
|
"grad_norm": 2.0809977054595947, |
|
"learning_rate": 8e-05, |
|
"loss": 2.3834, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.07020623080298377, |
|
"grad_norm": 2.163429021835327, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7688, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08424747696358052, |
|
"grad_norm": 2.884552001953125, |
|
"learning_rate": 9.997539658034168e-05, |
|
"loss": 2.8267, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09828872312417727, |
|
"grad_norm": 3.912673234939575, |
|
"learning_rate": 9.990161322484486e-05, |
|
"loss": 3.3231, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.11232996928477403, |
|
"grad_norm": 5.349924564361572, |
|
"learning_rate": 9.977873061452552e-05, |
|
"loss": 3.5412, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.12637121544537078, |
|
"grad_norm": 6.738022327423096, |
|
"learning_rate": 9.96068831197139e-05, |
|
"loss": 3.6021, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.14041246160596754, |
|
"grad_norm": 8.926319122314453, |
|
"learning_rate": 9.938625865312251e-05, |
|
"loss": 3.5968, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15445370776656428, |
|
"grad_norm": 8.15119743347168, |
|
"learning_rate": 9.911709846436641e-05, |
|
"loss": 3.9218, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.16849495392716105, |
|
"grad_norm": 12.402828216552734, |
|
"learning_rate": 9.879969687616027e-05, |
|
"loss": 4.5223, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.18253620008775778, |
|
"grad_norm": 2.7531869411468506, |
|
"learning_rate": 9.84344009624807e-05, |
|
"loss": 2.1757, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.19657744624835455, |
|
"grad_norm": 5.453584671020508, |
|
"learning_rate": 9.80216101690461e-05, |
|
"loss": 1.8657, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.21061869240895129, |
|
"grad_norm": 6.201056003570557, |
|
"learning_rate": 9.756177587652856e-05, |
|
"loss": 2.0874, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.22465993856954805, |
|
"grad_norm": 6.548382759094238, |
|
"learning_rate": 9.705540090697575e-05, |
|
"loss": 2.5228, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2387011847301448, |
|
"grad_norm": 4.07675313949585, |
|
"learning_rate": 9.650303897398232e-05, |
|
"loss": 2.5761, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.25274243089074155, |
|
"grad_norm": 2.8793468475341797, |
|
"learning_rate": 9.590529407721231e-05, |
|
"loss": 2.6513, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2667836770513383, |
|
"grad_norm": 5.506067276000977, |
|
"learning_rate": 9.526281984193436e-05, |
|
"loss": 3.0722, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2808249232119351, |
|
"grad_norm": 10.677892684936523, |
|
"learning_rate": 9.4576318804292e-05, |
|
"loss": 3.2006, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2948661693725318, |
|
"grad_norm": 9.370729446411133, |
|
"learning_rate": 9.384654164309083e-05, |
|
"loss": 3.1187, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.30890741553312856, |
|
"grad_norm": 7.775137901306152, |
|
"learning_rate": 9.30742863589421e-05, |
|
"loss": 3.2763, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3229486616937253, |
|
"grad_norm": 6.3419623374938965, |
|
"learning_rate": 9.226039740166091e-05, |
|
"loss": 3.7332, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3369899078543221, |
|
"grad_norm": 8.918274879455566, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 3.8572, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3510311540149188, |
|
"grad_norm": 15.176338195800781, |
|
"learning_rate": 9.051132292283771e-05, |
|
"loss": 5.0712, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3510311540149188, |
|
"eval_loss": 2.6227145195007324, |
|
"eval_runtime": 0.5047, |
|
"eval_samples_per_second": 99.066, |
|
"eval_steps_per_second": 25.757, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.36507240017551557, |
|
"grad_norm": 0.5822151303291321, |
|
"learning_rate": 8.957804998855866e-05, |
|
"loss": 2.2137, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.37911364633611233, |
|
"grad_norm": 0.612755298614502, |
|
"learning_rate": 8.860696646428693e-05, |
|
"loss": 1.8046, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3931548924967091, |
|
"grad_norm": 0.6512185335159302, |
|
"learning_rate": 8.759913421559902e-05, |
|
"loss": 2.0374, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.40719613865730586, |
|
"grad_norm": 0.9296779036521912, |
|
"learning_rate": 8.655565529226198e-05, |
|
"loss": 2.2375, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.42123738481790257, |
|
"grad_norm": 1.1704964637756348, |
|
"learning_rate": 8.547767072315835e-05, |
|
"loss": 2.4913, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.43527863097849934, |
|
"grad_norm": 1.2606786489486694, |
|
"learning_rate": 8.436635926858759e-05, |
|
"loss": 2.6343, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4493198771390961, |
|
"grad_norm": 1.8587498664855957, |
|
"learning_rate": 8.322293613130917e-05, |
|
"loss": 3.0057, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.46336112329969287, |
|
"grad_norm": 1.7915164232254028, |
|
"learning_rate": 8.204865162773613e-05, |
|
"loss": 3.0309, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4774023694602896, |
|
"grad_norm": 2.1659657955169678, |
|
"learning_rate": 8.084478982073247e-05, |
|
"loss": 3.1982, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.49144361562088634, |
|
"grad_norm": 2.6619133949279785, |
|
"learning_rate": 7.961266711550922e-05, |
|
"loss": 3.3039, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5054848617814831, |
|
"grad_norm": 3.697298288345337, |
|
"learning_rate": 7.835363082015468e-05, |
|
"loss": 3.6746, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5195261079420799, |
|
"grad_norm": 5.550060749053955, |
|
"learning_rate": 7.706905767237288e-05, |
|
"loss": 3.8725, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5335673541026766, |
|
"grad_norm": 0.6753820180892944, |
|
"learning_rate": 7.576035233404096e-05, |
|
"loss": 2.2385, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5476086002632734, |
|
"grad_norm": 1.0300302505493164, |
|
"learning_rate": 7.442894585523218e-05, |
|
"loss": 1.7155, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5616498464238702, |
|
"grad_norm": 1.5170297622680664, |
|
"learning_rate": 7.307629410938363e-05, |
|
"loss": 1.858, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5756910925844668, |
|
"grad_norm": 2.700623035430908, |
|
"learning_rate": 7.170387620131993e-05, |
|
"loss": 2.2176, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5897323387450636, |
|
"grad_norm": 3.4165198802948, |
|
"learning_rate": 7.031319284987394e-05, |
|
"loss": 2.4351, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6037735849056604, |
|
"grad_norm": 2.0354185104370117, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 2.4769, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6178148310662571, |
|
"grad_norm": 1.5906285047531128, |
|
"learning_rate": 6.7483130894283e-05, |
|
"loss": 2.8552, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6318560772268539, |
|
"grad_norm": 1.8608163595199585, |
|
"learning_rate": 6.604684692133597e-05, |
|
"loss": 2.8639, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6458973233874507, |
|
"grad_norm": 2.8141560554504395, |
|
"learning_rate": 6.459848338346861e-05, |
|
"loss": 2.8932, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6599385695480474, |
|
"grad_norm": 4.370851993560791, |
|
"learning_rate": 6.313962404494496e-05, |
|
"loss": 3.1846, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6739798157086442, |
|
"grad_norm": 4.608976364135742, |
|
"learning_rate": 6.167186414703289e-05, |
|
"loss": 3.3674, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.688021061869241, |
|
"grad_norm": 6.83935022354126, |
|
"learning_rate": 6.019680866363139e-05, |
|
"loss": 3.7393, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7020623080298376, |
|
"grad_norm": 11.380165100097656, |
|
"learning_rate": 5.8716070546254966e-05, |
|
"loss": 4.2031, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7020623080298376, |
|
"eval_loss": 2.51250958442688, |
|
"eval_runtime": 0.5058, |
|
"eval_samples_per_second": 98.857, |
|
"eval_steps_per_second": 25.703, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7161035541904344, |
|
"grad_norm": 0.2579071819782257, |
|
"learning_rate": 5.7231268960295e-05, |
|
"loss": 1.8802, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.7301448003510311, |
|
"grad_norm": 0.4030728042125702, |
|
"learning_rate": 5.574402751448614e-05, |
|
"loss": 1.6649, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.7441860465116279, |
|
"grad_norm": 0.5643269419670105, |
|
"learning_rate": 5.425597248551387e-05, |
|
"loss": 2.049, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7582272926722247, |
|
"grad_norm": 0.7029362916946411, |
|
"learning_rate": 5.2768731039705e-05, |
|
"loss": 2.2015, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7722685388328214, |
|
"grad_norm": 0.9349853992462158, |
|
"learning_rate": 5.128392945374505e-05, |
|
"loss": 2.4515, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7863097849934182, |
|
"grad_norm": 0.9511843323707581, |
|
"learning_rate": 4.980319133636863e-05, |
|
"loss": 2.6827, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.800351031154015, |
|
"grad_norm": 1.3044384717941284, |
|
"learning_rate": 4.83281358529671e-05, |
|
"loss": 2.8935, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.8143922773146117, |
|
"grad_norm": 1.4280407428741455, |
|
"learning_rate": 4.686037595505507e-05, |
|
"loss": 2.9421, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.8284335234752084, |
|
"grad_norm": 1.7793262004852295, |
|
"learning_rate": 4.54015166165314e-05, |
|
"loss": 3.0256, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.8424747696358051, |
|
"grad_norm": 2.2841506004333496, |
|
"learning_rate": 4.395315307866405e-05, |
|
"loss": 3.1879, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8565160157964019, |
|
"grad_norm": 3.2203478813171387, |
|
"learning_rate": 4.2516869105717004e-05, |
|
"loss": 3.5137, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8705572619569987, |
|
"grad_norm": 4.7022576332092285, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 3.5993, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8845985081175954, |
|
"grad_norm": 0.3040692210197449, |
|
"learning_rate": 3.968680715012606e-05, |
|
"loss": 2.1119, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8986397542781922, |
|
"grad_norm": 0.37016022205352783, |
|
"learning_rate": 3.829612379868006e-05, |
|
"loss": 1.8168, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.912681000438789, |
|
"grad_norm": 0.7952561378479004, |
|
"learning_rate": 3.692370589061639e-05, |
|
"loss": 2.1704, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.9267222465993857, |
|
"grad_norm": 1.0236883163452148, |
|
"learning_rate": 3.557105414476782e-05, |
|
"loss": 2.3811, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.9407634927599825, |
|
"grad_norm": 1.1880155801773071, |
|
"learning_rate": 3.423964766595906e-05, |
|
"loss": 2.6713, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.9548047389205792, |
|
"grad_norm": 1.3189986944198608, |
|
"learning_rate": 3.293094232762715e-05, |
|
"loss": 2.7698, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9688459850811759, |
|
"grad_norm": 1.691144347190857, |
|
"learning_rate": 3.164636917984534e-05, |
|
"loss": 3.0012, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9828872312417727, |
|
"grad_norm": 2.0627942085266113, |
|
"learning_rate": 3.0387332884490805e-05, |
|
"loss": 3.2204, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9969284774023695, |
|
"grad_norm": 4.911252498626709, |
|
"learning_rate": 2.9155210179267546e-05, |
|
"loss": 3.705, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.0109697235629662, |
|
"grad_norm": 11.608001708984375, |
|
"learning_rate": 2.7951348372263875e-05, |
|
"loss": 4.9898, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.025010969723563, |
|
"grad_norm": 0.3285130262374878, |
|
"learning_rate": 2.677706386869083e-05, |
|
"loss": 1.4574, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.0390522158841597, |
|
"grad_norm": 0.49857988953590393, |
|
"learning_rate": 2.5633640731412412e-05, |
|
"loss": 1.9682, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.0530934620447565, |
|
"grad_norm": 0.6986849308013916, |
|
"learning_rate": 2.4522329276841663e-05, |
|
"loss": 1.9892, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0530934620447565, |
|
"eval_loss": 2.4749317169189453, |
|
"eval_runtime": 0.5047, |
|
"eval_samples_per_second": 99.064, |
|
"eval_steps_per_second": 25.757, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0671347082053533, |
|
"grad_norm": 0.7719098925590515, |
|
"learning_rate": 2.3444344707738015e-05, |
|
"loss": 2.3545, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.08117595436595, |
|
"grad_norm": 0.8498649001121521, |
|
"learning_rate": 2.2400865784401e-05, |
|
"loss": 2.5526, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.0952172005265468, |
|
"grad_norm": 1.025146484375, |
|
"learning_rate": 2.1393033535713093e-05, |
|
"loss": 2.88, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.1092584466871436, |
|
"grad_norm": 1.3467072248458862, |
|
"learning_rate": 2.0421950011441354e-05, |
|
"loss": 2.7158, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.1232996928477403, |
|
"grad_norm": 1.4670014381408691, |
|
"learning_rate": 1.9488677077162295e-05, |
|
"loss": 2.8973, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.1373409390083369, |
|
"grad_norm": 1.7755794525146484, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 2.9892, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.1513821851689336, |
|
"grad_norm": 2.5536394119262695, |
|
"learning_rate": 1.77396025983391e-05, |
|
"loss": 3.3171, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.1654234313295304, |
|
"grad_norm": 3.319047451019287, |
|
"learning_rate": 1.6925713641057904e-05, |
|
"loss": 3.4315, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.1794646774901272, |
|
"grad_norm": 1.3052607774734497, |
|
"learning_rate": 1.6153458356909176e-05, |
|
"loss": 2.2188, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.193505923650724, |
|
"grad_norm": 0.33176228404045105, |
|
"learning_rate": 1.5423681195707997e-05, |
|
"loss": 2.1613, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.2075471698113207, |
|
"grad_norm": 0.4036065340042114, |
|
"learning_rate": 1.4737180158065644e-05, |
|
"loss": 1.8675, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.2215884159719175, |
|
"grad_norm": 0.5903648734092712, |
|
"learning_rate": 1.4094705922787687e-05, |
|
"loss": 1.971, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.2356296621325142, |
|
"grad_norm": 0.816626250743866, |
|
"learning_rate": 1.3496961026017687e-05, |
|
"loss": 2.235, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.249670908293111, |
|
"grad_norm": 0.8417714834213257, |
|
"learning_rate": 1.2944599093024267e-05, |
|
"loss": 2.3109, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.2637121544537078, |
|
"grad_norm": 0.9843570590019226, |
|
"learning_rate": 1.2438224123471442e-05, |
|
"loss": 2.4662, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.2777534006143045, |
|
"grad_norm": 1.1565916538238525, |
|
"learning_rate": 1.1978389830953907e-05, |
|
"loss": 2.7704, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.2917946467749013, |
|
"grad_norm": 1.2723060846328735, |
|
"learning_rate": 1.1565599037519316e-05, |
|
"loss": 2.7589, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.305835892935498, |
|
"grad_norm": 1.5614339113235474, |
|
"learning_rate": 1.1200303123839742e-05, |
|
"loss": 2.9048, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.3198771390960948, |
|
"grad_norm": 1.889614462852478, |
|
"learning_rate": 1.088290153563358e-05, |
|
"loss": 3.2427, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.3339183852566916, |
|
"grad_norm": 2.9528310298919678, |
|
"learning_rate": 1.0613741346877497e-05, |
|
"loss": 3.3262, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.3479596314172884, |
|
"grad_norm": 6.621575832366943, |
|
"learning_rate": 1.0393116880286118e-05, |
|
"loss": 4.0112, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.3620008775778851, |
|
"grad_norm": 2.7021443843841553, |
|
"learning_rate": 1.0221269385474488e-05, |
|
"loss": 2.6302, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.3760421237384817, |
|
"grad_norm": 0.3131318986415863, |
|
"learning_rate": 1.0098386775155147e-05, |
|
"loss": 1.4645, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.3900833698990787, |
|
"grad_norm": 0.44030845165252686, |
|
"learning_rate": 1.0024603419658329e-05, |
|
"loss": 1.8176, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.4041246160596752, |
|
"grad_norm": 0.6400426626205444, |
|
"learning_rate": 1e-05, |
|
"loss": 2.0536, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.4041246160596752, |
|
"eval_loss": 2.4680612087249756, |
|
"eval_runtime": 0.5034, |
|
"eval_samples_per_second": 99.333, |
|
"eval_steps_per_second": 25.827, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.90601825288192e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|