|
{ |
|
"best_metric": 0.11460215598344803, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 2.012802275960171, |
|
"eval_steps": 25, |
|
"global_step": 88, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02275960170697013, |
|
"grad_norm": 1.7416112422943115, |
|
"learning_rate": 0.00015, |
|
"loss": 3.3287, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02275960170697013, |
|
"eval_loss": 3.4932775497436523, |
|
"eval_runtime": 0.2862, |
|
"eval_samples_per_second": 174.715, |
|
"eval_steps_per_second": 45.426, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04551920341394026, |
|
"grad_norm": 1.6345694065093994, |
|
"learning_rate": 0.0003, |
|
"loss": 3.318, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06827880512091039, |
|
"grad_norm": 1.726089358329773, |
|
"learning_rate": 0.00029990993452998227, |
|
"loss": 3.3113, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.09103840682788052, |
|
"grad_norm": 1.496631145477295, |
|
"learning_rate": 0.00029963985829457943, |
|
"loss": 2.956, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.11379800853485064, |
|
"grad_norm": 1.4868099689483643, |
|
"learning_rate": 0.0002991901316573927, |
|
"loss": 2.7203, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.13655761024182078, |
|
"grad_norm": 1.4169163703918457, |
|
"learning_rate": 0.00029856135469013987, |
|
"loss": 2.394, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1593172119487909, |
|
"grad_norm": 1.5401833057403564, |
|
"learning_rate": 0.0002977543663719779, |
|
"loss": 2.1829, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.18207681365576103, |
|
"grad_norm": 1.8458465337753296, |
|
"learning_rate": 0.00029677024347005013, |
|
"loss": 2.0323, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.20483641536273114, |
|
"grad_norm": 2.272869110107422, |
|
"learning_rate": 0.0002956102991027524, |
|
"loss": 1.7811, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.22759601706970128, |
|
"grad_norm": 2.5353314876556396, |
|
"learning_rate": 0.0002942760809876348, |
|
"loss": 1.5327, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2503556187766714, |
|
"grad_norm": 2.5102078914642334, |
|
"learning_rate": 0.00029276936937627725, |
|
"loss": 1.2993, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.27311522048364156, |
|
"grad_norm": 2.5022499561309814, |
|
"learning_rate": 0.000291092174678894, |
|
"loss": 1.1518, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2958748221906117, |
|
"grad_norm": 1.592368483543396, |
|
"learning_rate": 0.00028924673478183645, |
|
"loss": 0.966, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3186344238975818, |
|
"grad_norm": 1.51223623752594, |
|
"learning_rate": 0.0002872355120615748, |
|
"loss": 0.8205, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3413940256045519, |
|
"grad_norm": 1.5905065536499023, |
|
"learning_rate": 0.00028506119009914, |
|
"loss": 0.7043, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.36415362731152207, |
|
"grad_norm": 1.3329778909683228, |
|
"learning_rate": 0.000282726670099414, |
|
"loss": 0.661, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3869132290184922, |
|
"grad_norm": 0.8890392780303955, |
|
"learning_rate": 0.00028023506702004174, |
|
"loss": 0.4706, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4096728307254623, |
|
"grad_norm": 0.8749057650566101, |
|
"learning_rate": 0.0002775897054151335, |
|
"loss": 0.4197, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.43243243243243246, |
|
"grad_norm": 0.8308561444282532, |
|
"learning_rate": 0.00027479411499930134, |
|
"loss": 0.4065, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.45519203413940257, |
|
"grad_norm": 0.8073086142539978, |
|
"learning_rate": 0.00027185202593794927, |
|
"loss": 0.3595, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4779516358463727, |
|
"grad_norm": 0.6640134453773499, |
|
"learning_rate": 0.0002687673638701018, |
|
"loss": 0.3145, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5007112375533428, |
|
"grad_norm": 0.9070307612419128, |
|
"learning_rate": 0.00026554424467041055, |
|
"loss": 0.4087, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5234708392603129, |
|
"grad_norm": 0.810393750667572, |
|
"learning_rate": 0.00026218696895732944, |
|
"loss": 0.3313, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5462304409672831, |
|
"grad_norm": 0.5537192821502686, |
|
"learning_rate": 0.0002587000163547856, |
|
"loss": 0.2993, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5689900426742532, |
|
"grad_norm": 0.7400078177452087, |
|
"learning_rate": 0.0002550880395150023, |
|
"loss": 0.292, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5689900426742532, |
|
"eval_loss": 0.27180495858192444, |
|
"eval_runtime": 0.2794, |
|
"eval_samples_per_second": 178.941, |
|
"eval_steps_per_second": 46.525, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5917496443812233, |
|
"grad_norm": 0.6441358923912048, |
|
"learning_rate": 0.0002513558579104503, |
|
"loss": 0.2836, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6145092460881935, |
|
"grad_norm": 0.5371875166893005, |
|
"learning_rate": 0.00024750845140320964, |
|
"loss": 0.2955, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6372688477951636, |
|
"grad_norm": 0.4502342939376831, |
|
"learning_rate": 0.00024355095360032364, |
|
"loss": 0.2183, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6600284495021337, |
|
"grad_norm": 0.38916662335395813, |
|
"learning_rate": 0.00023948864500401016, |
|
"loss": 0.2134, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6827880512091038, |
|
"grad_norm": 0.48821350932121277, |
|
"learning_rate": 0.00023532694596587055, |
|
"loss": 0.1972, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.705547652916074, |
|
"grad_norm": 0.43508380651474, |
|
"learning_rate": 0.00023107140945449652, |
|
"loss": 0.1729, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7283072546230441, |
|
"grad_norm": 0.5526984333992004, |
|
"learning_rate": 0.0002267277136461262, |
|
"loss": 0.1241, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.7510668563300142, |
|
"grad_norm": 0.38419315218925476, |
|
"learning_rate": 0.00022230165434823502, |
|
"loss": 0.2296, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.7738264580369844, |
|
"grad_norm": 0.5567602515220642, |
|
"learning_rate": 0.00021779913726617102, |
|
"loss": 0.2295, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.7965860597439545, |
|
"grad_norm": 0.4326823651790619, |
|
"learning_rate": 0.00021322617012315288, |
|
"loss": 0.1703, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8193456614509246, |
|
"grad_norm": 0.4207884669303894, |
|
"learning_rate": 0.00020858885464414522, |
|
"loss": 0.178, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.47066789865493774, |
|
"learning_rate": 0.00020389337841430707, |
|
"loss": 0.1736, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.4035787284374237, |
|
"learning_rate": 0.00019914600662287684, |
|
"loss": 0.1759, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.887624466571835, |
|
"grad_norm": 0.32935065031051636, |
|
"learning_rate": 0.00019435307370351017, |
|
"loss": 0.1141, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9103840682788051, |
|
"grad_norm": 0.26641812920570374, |
|
"learning_rate": 0.0001895209748822239, |
|
"loss": 0.1083, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9331436699857752, |
|
"grad_norm": 0.32735541462898254, |
|
"learning_rate": 0.00018465615764422566, |
|
"loss": 0.132, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.9559032716927454, |
|
"grad_norm": 0.21898917853832245, |
|
"learning_rate": 0.00017976511313101307, |
|
"loss": 0.1019, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.9786628733997155, |
|
"grad_norm": 0.2621893882751465, |
|
"learning_rate": 0.00017485436747922248, |
|
"loss": 0.0869, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0064011379800855, |
|
"grad_norm": 0.27739080786705017, |
|
"learning_rate": 0.00016993047311278397, |
|
"loss": 0.2184, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.0291607396870555, |
|
"grad_norm": 0.3383594751358032, |
|
"learning_rate": 0.000165, |
|
"loss": 0.1592, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0519203413940257, |
|
"grad_norm": 0.3034259080886841, |
|
"learning_rate": 0.00016006952688721603, |
|
"loss": 0.1443, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.0746799431009957, |
|
"grad_norm": 0.27324768900871277, |
|
"learning_rate": 0.0001551456325207775, |
|
"loss": 0.1408, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.097439544807966, |
|
"grad_norm": 0.22992853820323944, |
|
"learning_rate": 0.00015023488686898698, |
|
"loss": 0.1433, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.120199146514936, |
|
"grad_norm": 0.24354040622711182, |
|
"learning_rate": 0.00014534384235577436, |
|
"loss": 0.1316, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.1429587482219061, |
|
"grad_norm": 0.28851574659347534, |
|
"learning_rate": 0.0001404790251177761, |
|
"loss": 0.1076, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1429587482219061, |
|
"eval_loss": 0.11460215598344803, |
|
"eval_runtime": 0.2861, |
|
"eval_samples_per_second": 174.765, |
|
"eval_steps_per_second": 45.439, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1657183499288761, |
|
"grad_norm": 0.2691490352153778, |
|
"learning_rate": 0.00013564692629648982, |
|
"loss": 0.0852, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.1884779516358464, |
|
"grad_norm": 0.1689569503068924, |
|
"learning_rate": 0.00013085399337712307, |
|
"loss": 0.0964, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.2112375533428166, |
|
"grad_norm": 0.22519923746585846, |
|
"learning_rate": 0.00012610662158569293, |
|
"loss": 0.0914, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.2339971550497866, |
|
"grad_norm": 0.2679944634437561, |
|
"learning_rate": 0.0001214111453558548, |
|
"loss": 0.0706, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.2567567567567568, |
|
"grad_norm": 0.28117287158966064, |
|
"learning_rate": 0.00011677382987684708, |
|
"loss": 0.182, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.2795163584637268, |
|
"grad_norm": 0.2838458716869354, |
|
"learning_rate": 0.00011220086273382896, |
|
"loss": 0.1417, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.302275960170697, |
|
"grad_norm": 0.3674065172672272, |
|
"learning_rate": 0.00010769834565176498, |
|
"loss": 0.1263, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.3250355618776672, |
|
"grad_norm": 0.24421556293964386, |
|
"learning_rate": 0.0001032722863538738, |
|
"loss": 0.1178, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.3477951635846372, |
|
"grad_norm": 0.2171713262796402, |
|
"learning_rate": 9.892859054550347e-05, |
|
"loss": 0.1202, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.3705547652916075, |
|
"grad_norm": 0.2513107657432556, |
|
"learning_rate": 9.467305403412942e-05, |
|
"loss": 0.1421, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3933143669985775, |
|
"grad_norm": 0.1986607015132904, |
|
"learning_rate": 9.05113549959898e-05, |
|
"loss": 0.0953, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.4160739687055477, |
|
"grad_norm": 0.2196892499923706, |
|
"learning_rate": 8.644904639967639e-05, |
|
"loss": 0.0852, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.438833570412518, |
|
"grad_norm": 0.21326187252998352, |
|
"learning_rate": 8.249154859679033e-05, |
|
"loss": 0.0821, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.461593172119488, |
|
"grad_norm": 0.21575187146663666, |
|
"learning_rate": 7.864414208954971e-05, |
|
"loss": 0.0865, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.484352773826458, |
|
"grad_norm": 0.17554332315921783, |
|
"learning_rate": 7.491196048499769e-05, |
|
"loss": 0.0701, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.5071123755334281, |
|
"grad_norm": 0.3326629400253296, |
|
"learning_rate": 7.12999836452144e-05, |
|
"loss": 0.1359, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.5298719772403984, |
|
"grad_norm": 0.242942675948143, |
|
"learning_rate": 6.781303104267059e-05, |
|
"loss": 0.1213, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.5526315789473686, |
|
"grad_norm": 0.15724237263202667, |
|
"learning_rate": 6.445575532958945e-05, |
|
"loss": 0.0944, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.5753911806543386, |
|
"grad_norm": 0.1837826371192932, |
|
"learning_rate": 6.123263612989815e-05, |
|
"loss": 0.1227, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.5981507823613086, |
|
"grad_norm": 0.16875876486301422, |
|
"learning_rate": 5.81479740620507e-05, |
|
"loss": 0.1012, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.6209103840682788, |
|
"grad_norm": 0.2034892439842224, |
|
"learning_rate": 5.520588500069867e-05, |
|
"loss": 0.1228, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.643669985775249, |
|
"grad_norm": 0.18250970542430878, |
|
"learning_rate": 5.241029458486649e-05, |
|
"loss": 0.0753, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.666429587482219, |
|
"grad_norm": 0.12507431209087372, |
|
"learning_rate": 4.976493297995823e-05, |
|
"loss": 0.0749, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.689189189189189, |
|
"grad_norm": 0.16313859820365906, |
|
"learning_rate": 4.7273329900585954e-05, |
|
"loss": 0.0812, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.7119487908961593, |
|
"grad_norm": 0.14435066282749176, |
|
"learning_rate": 4.4938809900859955e-05, |
|
"loss": 0.0737, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.7119487908961593, |
|
"eval_loss": 0.09484730660915375, |
|
"eval_runtime": 0.2949, |
|
"eval_samples_per_second": 169.544, |
|
"eval_steps_per_second": 44.081, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.7347083926031295, |
|
"grad_norm": 0.1676023304462433, |
|
"learning_rate": 4.2764487938425205e-05, |
|
"loss": 0.0561, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.7574679943100997, |
|
"grad_norm": 0.26078304648399353, |
|
"learning_rate": 4.0753265218163486e-05, |
|
"loss": 0.1575, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.7802275960170697, |
|
"grad_norm": 0.23101890087127686, |
|
"learning_rate": 3.890782532110603e-05, |
|
"loss": 0.1118, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.8029871977240397, |
|
"grad_norm": 0.22551748156547546, |
|
"learning_rate": 3.7230630623722724e-05, |
|
"loss": 0.101, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.82574679943101, |
|
"grad_norm": 0.21574518084526062, |
|
"learning_rate": 3.572391901236521e-05, |
|
"loss": 0.1012, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.8485064011379801, |
|
"grad_norm": 0.15184852480888367, |
|
"learning_rate": 3.4389700897247595e-05, |
|
"loss": 0.1, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.8712660028449504, |
|
"grad_norm": 0.1621236801147461, |
|
"learning_rate": 3.322975652994985e-05, |
|
"loss": 0.083, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.8940256045519204, |
|
"grad_norm": 0.19756773114204407, |
|
"learning_rate": 3.2245633628022074e-05, |
|
"loss": 0.0676, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.9167852062588904, |
|
"grad_norm": 0.15019188821315765, |
|
"learning_rate": 3.143864530986012e-05, |
|
"loss": 0.0717, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.9395448079658606, |
|
"grad_norm": 0.1473209261894226, |
|
"learning_rate": 3.08098683426073e-05, |
|
"loss": 0.0856, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.9623044096728308, |
|
"grad_norm": 0.22414885461330414, |
|
"learning_rate": 3.0360141705420527e-05, |
|
"loss": 0.0729, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.9850640113798008, |
|
"grad_norm": 0.21767638623714447, |
|
"learning_rate": 3.009006547001768e-05, |
|
"loss": 0.0847, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.012802275960171, |
|
"grad_norm": 0.23539167642593384, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.1202, |
|
"step": 88 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 88, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.909666424120934e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|