|
{ |
|
"best_metric": 0.28790074586868286, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.5239572561185798, |
|
"eval_steps": 25, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005515339538090314, |
|
"grad_norm": 92.3752670288086, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 14.1418, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005515339538090314, |
|
"eval_loss": 18.986474990844727, |
|
"eval_runtime": 2.072, |
|
"eval_samples_per_second": 24.132, |
|
"eval_steps_per_second": 6.274, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011030679076180628, |
|
"grad_norm": 92.41960144042969, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 14.1418, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.016546018614270942, |
|
"grad_norm": 91.83131408691406, |
|
"learning_rate": 0.0001, |
|
"loss": 9.4242, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.022061358152361255, |
|
"grad_norm": 17.633739471435547, |
|
"learning_rate": 9.997376600647783e-05, |
|
"loss": 5.9923, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.027576697690451568, |
|
"grad_norm": 9.156025886535645, |
|
"learning_rate": 9.989509461357426e-05, |
|
"loss": 4.2342, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.033092037228541885, |
|
"grad_norm": 14.200757026672363, |
|
"learning_rate": 9.976407754861426e-05, |
|
"loss": 5.1585, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.038607376766632194, |
|
"grad_norm": 11.900317192077637, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 6.826, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04412271630472251, |
|
"grad_norm": 12.914311408996582, |
|
"learning_rate": 9.934567829727386e-05, |
|
"loss": 6.2055, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04963805584281282, |
|
"grad_norm": 14.33440113067627, |
|
"learning_rate": 9.905878394570453e-05, |
|
"loss": 5.3226, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.055153395380903136, |
|
"grad_norm": 16.229171752929688, |
|
"learning_rate": 9.872051902290737e-05, |
|
"loss": 4.2126, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06066873491899345, |
|
"grad_norm": 18.343318939208984, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 2.8094, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06618407445708377, |
|
"grad_norm": 20.21609878540039, |
|
"learning_rate": 9.789151450663723e-05, |
|
"loss": 1.1322, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07169941399517407, |
|
"grad_norm": 8.667510986328125, |
|
"learning_rate": 9.740174149534693e-05, |
|
"loss": 1.199, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07721475353326439, |
|
"grad_norm": 16.896587371826172, |
|
"learning_rate": 9.686252995020249e-05, |
|
"loss": 2.4148, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0827300930713547, |
|
"grad_norm": 17.433048248291016, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 2.4726, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08824543260944502, |
|
"grad_norm": 18.047983169555664, |
|
"learning_rate": 9.563836295460398e-05, |
|
"loss": 2.1929, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09376077214753534, |
|
"grad_norm": 18.881397247314453, |
|
"learning_rate": 9.495483482810688e-05, |
|
"loss": 1.6352, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09927611168562564, |
|
"grad_norm": 16.9337158203125, |
|
"learning_rate": 9.422472115147382e-05, |
|
"loss": 0.8618, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.10479145122371596, |
|
"grad_norm": 24.55199432373047, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.5281, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.11030679076180627, |
|
"grad_norm": 27.92365074157715, |
|
"learning_rate": 9.2628195591462e-05, |
|
"loss": 1.0844, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11582213029989659, |
|
"grad_norm": 26.889196395874023, |
|
"learning_rate": 9.176364518546989e-05, |
|
"loss": 1.218, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1213374698379869, |
|
"grad_norm": 26.291074752807617, |
|
"learning_rate": 9.08562300137157e-05, |
|
"loss": 1.0191, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1268528093760772, |
|
"grad_norm": 22.38892364501953, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 0.5982, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.13236814891416754, |
|
"grad_norm": 11.694781303405762, |
|
"learning_rate": 8.891708613973126e-05, |
|
"loss": 0.1916, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13788348845225784, |
|
"grad_norm": 1.7668880224227905, |
|
"learning_rate": 8.788761839251559e-05, |
|
"loss": 0.025, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13788348845225784, |
|
"eval_loss": 0.4710754156112671, |
|
"eval_runtime": 2.0825, |
|
"eval_samples_per_second": 24.009, |
|
"eval_steps_per_second": 6.242, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14339882799034814, |
|
"grad_norm": 15.763641357421875, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.0617, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.14891416752843847, |
|
"grad_norm": 15.280720710754395, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 1.3544, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15442950706652878, |
|
"grad_norm": 14.879006385803223, |
|
"learning_rate": 8.457416554680877e-05, |
|
"loss": 1.4492, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.1599448466046191, |
|
"grad_norm": 14.426602363586426, |
|
"learning_rate": 8.339895749467238e-05, |
|
"loss": 1.3997, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1654601861427094, |
|
"grad_norm": 14.272067070007324, |
|
"learning_rate": 8.219063752844926e-05, |
|
"loss": 1.2409, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1709755256807997, |
|
"grad_norm": 9.54008960723877, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 0.7096, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.17649086521889004, |
|
"grad_norm": 0.5781741738319397, |
|
"learning_rate": 7.968033420621935e-05, |
|
"loss": 0.0109, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.18200620475698034, |
|
"grad_norm": 1.3752659559249878, |
|
"learning_rate": 7.838127775159452e-05, |
|
"loss": 0.0241, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.18752154429507067, |
|
"grad_norm": 1.6789705753326416, |
|
"learning_rate": 7.705495977301078e-05, |
|
"loss": 0.0301, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.19303688383316098, |
|
"grad_norm": 1.4155908823013306, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 0.0255, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.19855222337125128, |
|
"grad_norm": 0.8778910040855408, |
|
"learning_rate": 7.43267549363537e-05, |
|
"loss": 0.0164, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2040675629093416, |
|
"grad_norm": 0.4698180556297302, |
|
"learning_rate": 7.292804904308087e-05, |
|
"loss": 0.0094, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2095829024474319, |
|
"grad_norm": 7.407131671905518, |
|
"learning_rate": 7.150843984658754e-05, |
|
"loss": 0.5839, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.21509824198552224, |
|
"grad_norm": 11.81148624420166, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 0.9861, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.22061358152361255, |
|
"grad_norm": 11.63061237335205, |
|
"learning_rate": 6.861315478964841e-05, |
|
"loss": 0.9721, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22612892106170285, |
|
"grad_norm": 11.430267333984375, |
|
"learning_rate": 6.714085470206609e-05, |
|
"loss": 0.9024, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.23164426059979318, |
|
"grad_norm": 11.040278434753418, |
|
"learning_rate": 6.56543989209901e-05, |
|
"loss": 0.7755, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.23715960013788348, |
|
"grad_norm": 9.684221267700195, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 0.5668, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2426749396759738, |
|
"grad_norm": 2.087521553039551, |
|
"learning_rate": 6.264596732629e-05, |
|
"loss": 0.0431, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2481902792140641, |
|
"grad_norm": 3.5265278816223145, |
|
"learning_rate": 6.112749920933111e-05, |
|
"loss": 0.0777, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2537056187521544, |
|
"grad_norm": 4.007287502288818, |
|
"learning_rate": 5.960188670239154e-05, |
|
"loss": 0.0911, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2592209582902447, |
|
"grad_norm": 3.595430612564087, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 0.0834, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.2647362978283351, |
|
"grad_norm": 2.904953956604004, |
|
"learning_rate": 5.653634995836856e-05, |
|
"loss": 0.0653, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2702516373664254, |
|
"grad_norm": 1.9227010011672974, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.0425, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2757669769045157, |
|
"grad_norm": 1.1187314987182617, |
|
"learning_rate": 5.346365004163145e-05, |
|
"loss": 0.0248, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2757669769045157, |
|
"eval_loss": 0.31743958592414856, |
|
"eval_runtime": 2.0803, |
|
"eval_samples_per_second": 24.035, |
|
"eval_steps_per_second": 6.249, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.281282316442606, |
|
"grad_norm": 9.661118507385254, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 0.6918, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2867976559806963, |
|
"grad_norm": 9.91291332244873, |
|
"learning_rate": 5.0398113297608465e-05, |
|
"loss": 0.7725, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.29231299551878664, |
|
"grad_norm": 9.74583911895752, |
|
"learning_rate": 4.887250079066892e-05, |
|
"loss": 0.8055, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.29782833505687695, |
|
"grad_norm": 9.611279487609863, |
|
"learning_rate": 4.7354032673710005e-05, |
|
"loss": 0.8017, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.30334367459496725, |
|
"grad_norm": 9.350974082946777, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 0.7482, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.30885901413305755, |
|
"grad_norm": 0.41890618205070496, |
|
"learning_rate": 4.43456010790099e-05, |
|
"loss": 0.0797, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.31437435367114785, |
|
"grad_norm": 0.7210342288017273, |
|
"learning_rate": 4.285914529793391e-05, |
|
"loss": 0.0184, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.3198896932092382, |
|
"grad_norm": 0.8834096193313599, |
|
"learning_rate": 4.13868452103516e-05, |
|
"loss": 0.0226, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3254050327473285, |
|
"grad_norm": 0.9115370512008667, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 0.0235, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.3309203722854188, |
|
"grad_norm": 0.8448688387870789, |
|
"learning_rate": 3.8491560153412466e-05, |
|
"loss": 0.0218, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3364357118235091, |
|
"grad_norm": 0.7082563042640686, |
|
"learning_rate": 3.707195095691913e-05, |
|
"loss": 0.0184, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3419510513615994, |
|
"grad_norm": 0.5660529136657715, |
|
"learning_rate": 3.567324506364632e-05, |
|
"loss": 0.0147, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3474663908996898, |
|
"grad_norm": 5.351521015167236, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 0.4547, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3529817304377801, |
|
"grad_norm": 8.921615600585938, |
|
"learning_rate": 3.2945040226989244e-05, |
|
"loss": 0.7382, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.3584970699758704, |
|
"grad_norm": 8.974565505981445, |
|
"learning_rate": 3.16187222484055e-05, |
|
"loss": 0.7427, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3640124095139607, |
|
"grad_norm": 8.922871589660645, |
|
"learning_rate": 3.0319665793780648e-05, |
|
"loss": 0.7236, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.369527749052051, |
|
"grad_norm": 8.645066261291504, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 0.6789, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.37504308859014135, |
|
"grad_norm": 5.97182035446167, |
|
"learning_rate": 2.7809362471550748e-05, |
|
"loss": 0.4572, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.38055842812823165, |
|
"grad_norm": 0.8891826868057251, |
|
"learning_rate": 2.660104250532764e-05, |
|
"loss": 0.0231, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.38607376766632195, |
|
"grad_norm": 1.1015204191207886, |
|
"learning_rate": 2.5425834453191232e-05, |
|
"loss": 0.029, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.39158910720441226, |
|
"grad_norm": 1.2106517553329468, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 0.0317, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.39710444674250256, |
|
"grad_norm": 1.1796172857284546, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.0314, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.4026197862805929, |
|
"grad_norm": 1.0850085020065308, |
|
"learning_rate": 2.2112381607484417e-05, |
|
"loss": 0.0283, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.4081351258186832, |
|
"grad_norm": 0.9120981097221375, |
|
"learning_rate": 2.1082913860268765e-05, |
|
"loss": 0.0237, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.4136504653567735, |
|
"grad_norm": 0.8024790287017822, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 0.0207, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4136504653567735, |
|
"eval_loss": 0.28790074586868286, |
|
"eval_runtime": 2.0948, |
|
"eval_samples_per_second": 23.869, |
|
"eval_steps_per_second": 6.206, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4191658048948638, |
|
"grad_norm": 8.318974494934082, |
|
"learning_rate": 1.91437699862843e-05, |
|
"loss": 0.6345, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.4246811444329541, |
|
"grad_norm": 8.486698150634766, |
|
"learning_rate": 1.8236354814530112e-05, |
|
"loss": 0.6599, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.4301964839710445, |
|
"grad_norm": 8.49923038482666, |
|
"learning_rate": 1.7371804408538024e-05, |
|
"loss": 0.6645, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.4357118235091348, |
|
"grad_norm": 8.403206825256348, |
|
"learning_rate": 1.6551126795408016e-05, |
|
"loss": 0.657, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.4412271630472251, |
|
"grad_norm": 8.299516677856445, |
|
"learning_rate": 1.577527884852619e-05, |
|
"loss": 0.6464, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4467425025853154, |
|
"grad_norm": 1.9985052347183228, |
|
"learning_rate": 1.5045165171893116e-05, |
|
"loss": 0.1938, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.4522578421234057, |
|
"grad_norm": 0.7599959969520569, |
|
"learning_rate": 1.4361637045396029e-05, |
|
"loss": 0.0196, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.45777318166149605, |
|
"grad_norm": 0.8468267917633057, |
|
"learning_rate": 1.3725491432254624e-05, |
|
"loss": 0.0219, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.46328852119958636, |
|
"grad_norm": 0.8709926009178162, |
|
"learning_rate": 1.313747004979751e-05, |
|
"loss": 0.0224, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.46880386073767666, |
|
"grad_norm": 0.857819676399231, |
|
"learning_rate": 1.2598258504653081e-05, |
|
"loss": 0.0221, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.47431920027576696, |
|
"grad_norm": 0.807769775390625, |
|
"learning_rate": 1.2108485493362765e-05, |
|
"loss": 0.0207, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.47983453981385726, |
|
"grad_norm": 0.8083691000938416, |
|
"learning_rate": 1.1668722069349041e-05, |
|
"loss": 0.0209, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.4853498793519476, |
|
"grad_norm": 4.770956993103027, |
|
"learning_rate": 1.1279480977092635e-05, |
|
"loss": 0.3815, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.4908652188900379, |
|
"grad_norm": 8.195169448852539, |
|
"learning_rate": 1.094121605429547e-05, |
|
"loss": 0.6112, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.4963805584281282, |
|
"grad_norm": 8.203980445861816, |
|
"learning_rate": 1.0654321702726141e-05, |
|
"loss": 0.6149, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5018958979662186, |
|
"grad_norm": 8.144838333129883, |
|
"learning_rate": 1.0419132428365116e-05, |
|
"loss": 0.609, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.5074112375043088, |
|
"grad_norm": 8.066883087158203, |
|
"learning_rate": 1.0235922451385733e-05, |
|
"loss": 0.5882, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.5129265770423992, |
|
"grad_norm": 6.3228607177734375, |
|
"learning_rate": 1.0104905386425733e-05, |
|
"loss": 0.4687, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.5184419165804894, |
|
"grad_norm": 0.907468318939209, |
|
"learning_rate": 1.002623399352217e-05, |
|
"loss": 0.0236, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.5239572561185798, |
|
"grad_norm": 1.0146715641021729, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0264, |
|
"step": 95 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.087136995672064e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|