|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7692307692307693, |
|
"eval_steps": 500, |
|
"global_step": 140, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005494505494505495, |
|
"grad_norm": 0.24404513835906982, |
|
"learning_rate": 4e-05, |
|
"loss": 1.3921, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01098901098901099, |
|
"grad_norm": 0.25437888503074646, |
|
"learning_rate": 8e-05, |
|
"loss": 1.3836, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.016483516483516484, |
|
"grad_norm": 0.26151242852211, |
|
"learning_rate": 0.00012, |
|
"loss": 1.4148, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02197802197802198, |
|
"grad_norm": 0.22220638394355774, |
|
"learning_rate": 0.00016, |
|
"loss": 1.3359, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.027472527472527472, |
|
"grad_norm": 0.15509554743766785, |
|
"learning_rate": 0.0002, |
|
"loss": 1.319, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03296703296703297, |
|
"grad_norm": 0.12672632932662964, |
|
"learning_rate": 0.0001999842488663838, |
|
"loss": 1.0436, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 0.11594443768262863, |
|
"learning_rate": 0.00019993700042749937, |
|
"loss": 1.1249, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04395604395604396, |
|
"grad_norm": 0.16988468170166016, |
|
"learning_rate": 0.0001998582695676762, |
|
"loss": 0.978, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04945054945054945, |
|
"grad_norm": 0.16477666795253754, |
|
"learning_rate": 0.00019974808108892016, |
|
"loss": 0.8757, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.054945054945054944, |
|
"grad_norm": 0.16230840981006622, |
|
"learning_rate": 0.00019960646970310027, |
|
"loss": 0.7733, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06043956043956044, |
|
"grad_norm": 0.16096125543117523, |
|
"learning_rate": 0.00019943348002101371, |
|
"loss": 0.6367, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06593406593406594, |
|
"grad_norm": 0.17279262840747833, |
|
"learning_rate": 0.00019922916653833248, |
|
"loss": 0.5396, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 0.16378775238990784, |
|
"learning_rate": 0.0001989935936184358, |
|
"loss": 0.4376, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 0.16942651569843292, |
|
"learning_rate": 0.00019872683547213446, |
|
"loss": 0.481, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08241758241758242, |
|
"grad_norm": 0.12053713202476501, |
|
"learning_rate": 0.00019842897613429262, |
|
"loss": 0.4296, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08791208791208792, |
|
"grad_norm": 0.12406298518180847, |
|
"learning_rate": 0.00019810010943735479, |
|
"loss": 0.2941, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09340659340659341, |
|
"grad_norm": 0.10388841480016708, |
|
"learning_rate": 0.00019774033898178667, |
|
"loss": 0.41, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0989010989010989, |
|
"grad_norm": 0.08794189989566803, |
|
"learning_rate": 0.00019734977810343865, |
|
"loss": 0.3058, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1043956043956044, |
|
"grad_norm": 0.09318755567073822, |
|
"learning_rate": 0.00019692854983784235, |
|
"loss": 0.3256, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"grad_norm": 0.07988230884075165, |
|
"learning_rate": 0.0001964767868814516, |
|
"loss": 0.3801, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11538461538461539, |
|
"grad_norm": 0.06846581399440765, |
|
"learning_rate": 0.0001959946315498402, |
|
"loss": 0.3631, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.12087912087912088, |
|
"grad_norm": 0.09932168573141098, |
|
"learning_rate": 0.0001954822357328692, |
|
"loss": 0.4244, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.12637362637362637, |
|
"grad_norm": 0.07102064788341522, |
|
"learning_rate": 0.00019493976084683813, |
|
"loss": 0.3414, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.13186813186813187, |
|
"grad_norm": 0.07554670423269272, |
|
"learning_rate": 0.00019436737778363527, |
|
"loss": 0.3206, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13736263736263737, |
|
"grad_norm": 0.06632914394140244, |
|
"learning_rate": 0.0001937652668569028, |
|
"loss": 0.3037, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 0.07803873717784882, |
|
"learning_rate": 0.00019313361774523385, |
|
"loss": 0.285, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.14835164835164835, |
|
"grad_norm": 0.09491067379713058, |
|
"learning_rate": 0.0001924726294324196, |
|
"loss": 0.3508, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 0.08697083592414856, |
|
"learning_rate": 0.00019178251014476466, |
|
"loss": 0.3275, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15934065934065933, |
|
"grad_norm": 0.08148891478776932, |
|
"learning_rate": 0.00019106347728549135, |
|
"loss": 0.2527, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.16483516483516483, |
|
"grad_norm": 0.08663398027420044, |
|
"learning_rate": 0.00019031575736625238, |
|
"loss": 0.2488, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.17032967032967034, |
|
"grad_norm": 0.08816689252853394, |
|
"learning_rate": 0.00018953958593577493, |
|
"loss": 0.2511, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.17582417582417584, |
|
"grad_norm": 0.0893981084227562, |
|
"learning_rate": 0.00018873520750565718, |
|
"loss": 0.2498, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1813186813186813, |
|
"grad_norm": 0.08614163100719452, |
|
"learning_rate": 0.00018790287547334176, |
|
"loss": 0.2316, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.18681318681318682, |
|
"grad_norm": 0.10351146012544632, |
|
"learning_rate": 0.00018704285204228973, |
|
"loss": 0.2733, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": 0.08243081718683243, |
|
"learning_rate": 0.0001861554081393806, |
|
"loss": 0.2278, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1978021978021978, |
|
"grad_norm": 0.097475066781044, |
|
"learning_rate": 0.00018524082332956428, |
|
"loss": 0.2459, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2032967032967033, |
|
"grad_norm": 0.08360479772090912, |
|
"learning_rate": 0.00018429938572779152, |
|
"loss": 0.2342, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2087912087912088, |
|
"grad_norm": 0.09262491017580032, |
|
"learning_rate": 0.0001833313919082515, |
|
"loss": 0.234, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 0.11125553399324417, |
|
"learning_rate": 0.00018233714681094404, |
|
"loss": 0.3167, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 0.08724531531333923, |
|
"learning_rate": 0.00018131696364561667, |
|
"loss": 0.217, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22527472527472528, |
|
"grad_norm": 0.08204808831214905, |
|
"learning_rate": 0.00018027116379309638, |
|
"loss": 0.2091, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.23076923076923078, |
|
"grad_norm": 0.11685692518949509, |
|
"learning_rate": 0.0001792000767040474, |
|
"loss": 0.2218, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.23626373626373626, |
|
"grad_norm": 0.10508550703525543, |
|
"learning_rate": 0.00017810403979518681, |
|
"loss": 0.1959, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.24175824175824176, |
|
"grad_norm": 0.13068167865276337, |
|
"learning_rate": 0.00017698339834299061, |
|
"loss": 0.1616, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.24725274725274726, |
|
"grad_norm": 0.1325182318687439, |
|
"learning_rate": 0.00017583850537492387, |
|
"loss": 0.1398, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.25274725274725274, |
|
"grad_norm": 0.14320158958435059, |
|
"learning_rate": 0.0001746697215582288, |
|
"loss": 0.2189, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.25824175824175827, |
|
"grad_norm": 0.11843352764844894, |
|
"learning_rate": 0.00017347741508630672, |
|
"loss": 0.1261, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.26373626373626374, |
|
"grad_norm": 0.12478692084550858, |
|
"learning_rate": 0.00017226196156272874, |
|
"loss": 0.2066, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2692307692307692, |
|
"grad_norm": 0.11781392246484756, |
|
"learning_rate": 0.00017102374388291183, |
|
"loss": 0.2071, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.27472527472527475, |
|
"grad_norm": 0.10600878298282623, |
|
"learning_rate": 0.0001697631521134985, |
|
"loss": 0.203, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2802197802197802, |
|
"grad_norm": 0.10019295662641525, |
|
"learning_rate": 0.00016848058336947657, |
|
"loss": 0.2265, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.08874925225973129, |
|
"learning_rate": 0.0001671764416890793, |
|
"loss": 0.1193, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.29120879120879123, |
|
"grad_norm": 0.0978781059384346, |
|
"learning_rate": 0.00016585113790650388, |
|
"loss": 0.1311, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.2967032967032967, |
|
"grad_norm": 0.12399563938379288, |
|
"learning_rate": 0.00016450508952248956, |
|
"loss": 0.1398, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.3021978021978022, |
|
"grad_norm": 0.11917977780103683, |
|
"learning_rate": 0.00016313872057279534, |
|
"loss": 0.149, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 0.1155751645565033, |
|
"learning_rate": 0.0001617524614946192, |
|
"loss": 0.1819, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.3131868131868132, |
|
"grad_norm": 0.09933874011039734, |
|
"learning_rate": 0.0001603467489910004, |
|
"loss": 0.0918, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.31868131868131866, |
|
"grad_norm": 0.1160176694393158, |
|
"learning_rate": 0.00015892202589324835, |
|
"loss": 0.0983, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3241758241758242, |
|
"grad_norm": 0.12610028684139252, |
|
"learning_rate": 0.0001574787410214407, |
|
"loss": 0.1345, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.32967032967032966, |
|
"grad_norm": 0.09502307325601578, |
|
"learning_rate": 0.0001560173490430346, |
|
"loss": 0.0753, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.33516483516483514, |
|
"grad_norm": 0.12514734268188477, |
|
"learning_rate": 0.0001545383103296365, |
|
"loss": 0.1218, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.34065934065934067, |
|
"grad_norm": 0.10972923040390015, |
|
"learning_rate": 0.00015304209081197425, |
|
"loss": 0.0988, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.34615384615384615, |
|
"grad_norm": 0.14897628128528595, |
|
"learning_rate": 0.0001515291618331188, |
|
"loss": 0.1729, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3516483516483517, |
|
"grad_norm": 0.10310367494821548, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.1261, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 0.11633659899234772, |
|
"learning_rate": 0.00014845508703326504, |
|
"loss": 0.1215, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3626373626373626, |
|
"grad_norm": 0.12089478224515915, |
|
"learning_rate": 0.00014689490961552513, |
|
"loss": 0.1137, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.36813186813186816, |
|
"grad_norm": 0.12880107760429382, |
|
"learning_rate": 0.00014531995923803973, |
|
"loss": 0.1254, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.37362637362637363, |
|
"grad_norm": 0.10465559363365173, |
|
"learning_rate": 0.00014373073204588556, |
|
"loss": 0.0866, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.3791208791208791, |
|
"grad_norm": 0.09393016248941422, |
|
"learning_rate": 0.00014212772868165958, |
|
"loss": 0.0671, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": 0.09669383615255356, |
|
"learning_rate": 0.00014051145412776535, |
|
"loss": 0.0835, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3901098901098901, |
|
"grad_norm": 0.10696904361248016, |
|
"learning_rate": 0.00013888241754733208, |
|
"loss": 0.1146, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.3956043956043956, |
|
"grad_norm": 0.18148086965084076, |
|
"learning_rate": 0.0001372411321238166, |
|
"loss": 0.1937, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.4010989010989011, |
|
"grad_norm": 0.11100444942712784, |
|
"learning_rate": 0.00013558811489933908, |
|
"loss": 0.1204, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.4065934065934066, |
|
"grad_norm": 0.08652353286743164, |
|
"learning_rate": 0.00013392388661180303, |
|
"loss": 0.0682, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.41208791208791207, |
|
"grad_norm": 0.09470050036907196, |
|
"learning_rate": 0.0001322489715308509, |
|
"loss": 0.0638, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4175824175824176, |
|
"grad_norm": 0.11347407102584839, |
|
"learning_rate": 0.00013056389729270738, |
|
"loss": 0.081, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.4230769230769231, |
|
"grad_norm": 0.1232433095574379, |
|
"learning_rate": 0.0001288691947339621, |
|
"loss": 0.1186, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.08470042794942856, |
|
"learning_rate": 0.00012716539772434388, |
|
"loss": 0.061, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.4340659340659341, |
|
"grad_norm": 0.10937822610139847, |
|
"learning_rate": 0.00012545304299853977, |
|
"loss": 0.0826, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 0.094424307346344, |
|
"learning_rate": 0.0001237326699871115, |
|
"loss": 0.0689, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.44505494505494503, |
|
"grad_norm": 0.11375095695257187, |
|
"learning_rate": 0.00012200482064656248, |
|
"loss": 0.1107, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.45054945054945056, |
|
"grad_norm": 0.11452016979455948, |
|
"learning_rate": 0.00012027003928860937, |
|
"loss": 0.0886, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.45604395604395603, |
|
"grad_norm": 0.09138914942741394, |
|
"learning_rate": 0.00011852887240871145, |
|
"loss": 0.0825, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.46153846153846156, |
|
"grad_norm": 0.09349829703569412, |
|
"learning_rate": 0.00011678186851391218, |
|
"loss": 0.0853, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.46703296703296704, |
|
"grad_norm": 0.0957733765244484, |
|
"learning_rate": 0.00011502957795004705, |
|
"loss": 0.0705, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.4725274725274725, |
|
"grad_norm": 0.11201193928718567, |
|
"learning_rate": 0.00011327255272837221, |
|
"loss": 0.0782, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.47802197802197804, |
|
"grad_norm": 0.11689481139183044, |
|
"learning_rate": 0.00011151134635166829, |
|
"loss": 0.0634, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.4835164835164835, |
|
"grad_norm": 0.09048259258270264, |
|
"learning_rate": 0.00010974651363987465, |
|
"loss": 0.075, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.489010989010989, |
|
"grad_norm": 0.0914902612566948, |
|
"learning_rate": 0.00010797861055530831, |
|
"loss": 0.0697, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.4945054945054945, |
|
"grad_norm": 0.0922885537147522, |
|
"learning_rate": 0.0001062081940275234, |
|
"loss": 0.0666, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.08988504856824875, |
|
"learning_rate": 0.00010443582177786564, |
|
"loss": 0.086, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.5054945054945055, |
|
"grad_norm": 0.12048965692520142, |
|
"learning_rate": 0.00010266205214377748, |
|
"loss": 0.0905, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.510989010989011, |
|
"grad_norm": 0.08413669466972351, |
|
"learning_rate": 0.0001008874439029091, |
|
"loss": 0.059, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.5164835164835165, |
|
"grad_norm": 0.09707523882389069, |
|
"learning_rate": 9.91125560970909e-05, |
|
"loss": 0.1015, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.521978021978022, |
|
"grad_norm": 0.13043560087680817, |
|
"learning_rate": 9.733794785622253e-05, |
|
"loss": 0.0914, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.5274725274725275, |
|
"grad_norm": 0.10451046377420425, |
|
"learning_rate": 9.556417822213435e-05, |
|
"loss": 0.0904, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.532967032967033, |
|
"grad_norm": 0.07710960507392883, |
|
"learning_rate": 9.379180597247661e-05, |
|
"loss": 0.0557, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.5384615384615384, |
|
"grad_norm": 0.09335189312696457, |
|
"learning_rate": 9.202138944469168e-05, |
|
"loss": 0.0782, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.5439560439560439, |
|
"grad_norm": 0.07391183078289032, |
|
"learning_rate": 9.025348636012536e-05, |
|
"loss": 0.0616, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.5494505494505495, |
|
"grad_norm": 0.07584051787853241, |
|
"learning_rate": 8.84886536483317e-05, |
|
"loss": 0.0669, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.554945054945055, |
|
"grad_norm": 0.07729068398475647, |
|
"learning_rate": 8.672744727162781e-05, |
|
"loss": 0.0635, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.5604395604395604, |
|
"grad_norm": 0.0983298122882843, |
|
"learning_rate": 8.497042204995299e-05, |
|
"loss": 0.0556, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.5659340659340659, |
|
"grad_norm": 0.07858923077583313, |
|
"learning_rate": 8.321813148608783e-05, |
|
"loss": 0.0551, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.1307792365550995, |
|
"learning_rate": 8.147112759128859e-05, |
|
"loss": 0.1135, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"grad_norm": 0.08517476916313171, |
|
"learning_rate": 7.972996071139064e-05, |
|
"loss": 0.053, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5824175824175825, |
|
"grad_norm": 0.05371192470192909, |
|
"learning_rate": 7.799517935343757e-05, |
|
"loss": 0.0298, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.5879120879120879, |
|
"grad_norm": 0.06914077699184418, |
|
"learning_rate": 7.626733001288851e-05, |
|
"loss": 0.0452, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.5934065934065934, |
|
"grad_norm": 0.10342265665531158, |
|
"learning_rate": 7.454695700146024e-05, |
|
"loss": 0.0889, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.5989010989010989, |
|
"grad_norm": 0.0685519203543663, |
|
"learning_rate": 7.283460227565613e-05, |
|
"loss": 0.0588, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.6043956043956044, |
|
"grad_norm": 0.05628138780593872, |
|
"learning_rate": 7.113080526603792e-05, |
|
"loss": 0.046, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6098901098901099, |
|
"grad_norm": 0.11078569293022156, |
|
"learning_rate": 6.94361027072926e-05, |
|
"loss": 0.0791, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 0.05526568740606308, |
|
"learning_rate": 6.775102846914911e-05, |
|
"loss": 0.0559, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.6208791208791209, |
|
"grad_norm": 0.07777650654315948, |
|
"learning_rate": 6.607611338819697e-05, |
|
"loss": 0.0574, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.6263736263736264, |
|
"grad_norm": 0.06428060680627823, |
|
"learning_rate": 6.441188510066091e-05, |
|
"loss": 0.0386, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.6318681318681318, |
|
"grad_norm": 0.1773328185081482, |
|
"learning_rate": 6.275886787618339e-05, |
|
"loss": 0.1229, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.6373626373626373, |
|
"grad_norm": 0.05550393462181091, |
|
"learning_rate": 6.111758245266794e-05, |
|
"loss": 0.0414, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 0.041509225964546204, |
|
"learning_rate": 5.9488545872234645e-05, |
|
"loss": 0.0341, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.6483516483516484, |
|
"grad_norm": 0.10156530886888504, |
|
"learning_rate": 5.787227131834043e-05, |
|
"loss": 0.0765, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.6538461538461539, |
|
"grad_norm": 0.0621609091758728, |
|
"learning_rate": 5.626926795411447e-05, |
|
"loss": 0.0515, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 0.12675149738788605, |
|
"learning_rate": 5.468004076196029e-05, |
|
"loss": 0.0736, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6648351648351648, |
|
"grad_norm": 0.06981629878282547, |
|
"learning_rate": 5.310509038447492e-05, |
|
"loss": 0.07, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.6703296703296703, |
|
"grad_norm": 0.05079374462366104, |
|
"learning_rate": 5.1544912966734994e-05, |
|
"loss": 0.0509, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.6758241758241759, |
|
"grad_norm": 0.06290542334318161, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.0577, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.6813186813186813, |
|
"grad_norm": 0.042649202048778534, |
|
"learning_rate": 4.8470838166881226e-05, |
|
"loss": 0.035, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.6868131868131868, |
|
"grad_norm": 0.07203595340251923, |
|
"learning_rate": 4.695790918802576e-05, |
|
"loss": 0.0706, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6923076923076923, |
|
"grad_norm": 0.04454479739069939, |
|
"learning_rate": 4.546168967036351e-05, |
|
"loss": 0.034, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.6978021978021978, |
|
"grad_norm": 0.041393596678972244, |
|
"learning_rate": 4.398265095696539e-05, |
|
"loss": 0.0375, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.7032967032967034, |
|
"grad_norm": 0.047695551067590714, |
|
"learning_rate": 4.252125897855932e-05, |
|
"loss": 0.0402, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.7087912087912088, |
|
"grad_norm": 0.06419022381305695, |
|
"learning_rate": 4.107797410675166e-05, |
|
"loss": 0.0511, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.04011767357587814, |
|
"learning_rate": 3.965325100899961e-05, |
|
"loss": 0.0382, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7197802197802198, |
|
"grad_norm": 0.04134465754032135, |
|
"learning_rate": 3.824753850538082e-05, |
|
"loss": 0.0407, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.7252747252747253, |
|
"grad_norm": 0.05654672160744667, |
|
"learning_rate": 3.686127942720463e-05, |
|
"loss": 0.0505, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.7307692307692307, |
|
"grad_norm": 0.05311698094010353, |
|
"learning_rate": 3.5494910477510445e-05, |
|
"loss": 0.0514, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.7362637362637363, |
|
"grad_norm": 0.06260506808757782, |
|
"learning_rate": 3.414886209349615e-05, |
|
"loss": 0.0514, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.7417582417582418, |
|
"grad_norm": 0.06393005698919296, |
|
"learning_rate": 3.282355831092072e-05, |
|
"loss": 0.0413, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.7472527472527473, |
|
"grad_norm": 0.048107732087373734, |
|
"learning_rate": 3.1519416630523444e-05, |
|
"loss": 0.0479, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.7527472527472527, |
|
"grad_norm": 0.04841303825378418, |
|
"learning_rate": 3.0236847886501542e-05, |
|
"loss": 0.0342, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.7582417582417582, |
|
"grad_norm": 0.0669633001089096, |
|
"learning_rate": 2.8976256117088195e-05, |
|
"loss": 0.0485, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.7637362637362637, |
|
"grad_norm": 0.06344759464263916, |
|
"learning_rate": 2.7738038437271284e-05, |
|
"loss": 0.0535, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 0.07120760530233383, |
|
"learning_rate": 2.6522584913693294e-05, |
|
"loss": 0.0465, |
|
"step": 140 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 182, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7846012682974822e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|