|
{ |
|
"best_metric": 0.12361826002597809, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.253324889170361, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00253324889170361, |
|
"grad_norm": 3.6676442623138428, |
|
"learning_rate": 4e-05, |
|
"loss": 3.0591, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00253324889170361, |
|
"eval_loss": 3.577761650085449, |
|
"eval_runtime": 44.9583, |
|
"eval_samples_per_second": 3.715, |
|
"eval_steps_per_second": 1.868, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00506649778340722, |
|
"grad_norm": 3.199979305267334, |
|
"learning_rate": 8e-05, |
|
"loss": 2.7215, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007599746675110829, |
|
"grad_norm": 3.33293080329895, |
|
"learning_rate": 0.00012, |
|
"loss": 2.9505, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01013299556681444, |
|
"grad_norm": 4.012212753295898, |
|
"learning_rate": 0.00016, |
|
"loss": 2.8184, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01266624445851805, |
|
"grad_norm": 4.058014869689941, |
|
"learning_rate": 0.0002, |
|
"loss": 1.986, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015199493350221659, |
|
"grad_norm": 3.3792548179626465, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 1.3189, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01773274224192527, |
|
"grad_norm": 3.53857159614563, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 0.7923, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02026599113362888, |
|
"grad_norm": 5.587308406829834, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 0.7796, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022799240025332488, |
|
"grad_norm": 3.04203724861145, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 0.1713, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0253324889170361, |
|
"grad_norm": 3.9056479930877686, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 0.223, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02786573780873971, |
|
"grad_norm": 3.32242488861084, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 0.256, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.030398986700443317, |
|
"grad_norm": 0.5264054536819458, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 0.0183, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.032932235592146926, |
|
"grad_norm": 2.039252281188965, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 0.2864, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03546548448385054, |
|
"grad_norm": 1.891056776046753, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 0.0422, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03799873337555415, |
|
"grad_norm": 2.2989518642425537, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 0.5805, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04053198226725776, |
|
"grad_norm": 2.4155068397521973, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 0.2322, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04306523115896137, |
|
"grad_norm": 2.4074628353118896, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 0.4752, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.045598480050664976, |
|
"grad_norm": 0.9219037294387817, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 0.2356, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.048131728942368585, |
|
"grad_norm": 1.37091064453125, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 0.2088, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0506649778340722, |
|
"grad_norm": 0.2628805935382843, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 0.0109, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05319822672577581, |
|
"grad_norm": 2.158172845840454, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 0.2097, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05573147561747942, |
|
"grad_norm": 1.4831571578979492, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.1739, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.058264724509183026, |
|
"grad_norm": 1.6883933544158936, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 0.2868, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.060797973400886635, |
|
"grad_norm": 1.1558237075805664, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.1405, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06333122229259025, |
|
"grad_norm": 1.421405553817749, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.162, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06586447118429385, |
|
"grad_norm": 3.7727131843566895, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.291, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06839772007599747, |
|
"grad_norm": 1.3770571947097778, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 0.12, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07093096896770108, |
|
"grad_norm": 6.9208149909973145, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.7353, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07346421785940468, |
|
"grad_norm": 2.6051218509674072, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 0.2355, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0759974667511083, |
|
"grad_norm": 3.3124608993530273, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 0.7072, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0785307156428119, |
|
"grad_norm": 2.073368787765503, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 0.1301, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08106396453451552, |
|
"grad_norm": 0.6973274350166321, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.0637, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08359721342621912, |
|
"grad_norm": 1.706817865371704, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 0.125, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08613046231792273, |
|
"grad_norm": 1.9682927131652832, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.3533, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08866371120962635, |
|
"grad_norm": 2.4648215770721436, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 0.6709, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09119696010132995, |
|
"grad_norm": 2.308652877807617, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.1397, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09373020899303357, |
|
"grad_norm": 3.2563140392303467, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 0.3003, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09626345788473717, |
|
"grad_norm": 3.14212965965271, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.2907, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09879670677644078, |
|
"grad_norm": 1.726691484451294, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 0.0962, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1013299556681444, |
|
"grad_norm": 4.3779449462890625, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 0.3433, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.103863204559848, |
|
"grad_norm": 3.401987314224243, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 0.2321, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10639645345155162, |
|
"grad_norm": 1.7866804599761963, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 0.0461, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10892970234325522, |
|
"grad_norm": 2.7210962772369385, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.2669, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11146295123495883, |
|
"grad_norm": 0.31108513474464417, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 0.0179, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11399620012666245, |
|
"grad_norm": 1.0655226707458496, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 0.1818, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11652944901836605, |
|
"grad_norm": 2.1581103801727295, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 0.4705, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11906269791006967, |
|
"grad_norm": 0.13762931525707245, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 0.0066, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12159594680177327, |
|
"grad_norm": 0.04102027043700218, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 0.0021, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12412919569347688, |
|
"grad_norm": 0.09145709127187729, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 0.0054, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1266624445851805, |
|
"grad_norm": 0.6207671165466309, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.009, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1266624445851805, |
|
"eval_loss": 0.13963158428668976, |
|
"eval_runtime": 45.5826, |
|
"eval_samples_per_second": 3.664, |
|
"eval_steps_per_second": 1.843, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12919569347688412, |
|
"grad_norm": 4.131649494171143, |
|
"learning_rate": 0.00010495837546732224, |
|
"loss": 0.0664, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1317289423685877, |
|
"grad_norm": 1.5766167640686035, |
|
"learning_rate": 0.00010165339447663587, |
|
"loss": 0.0462, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.13426219126029132, |
|
"grad_norm": 0.21246828138828278, |
|
"learning_rate": 9.834660552336415e-05, |
|
"loss": 0.017, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.13679544015199493, |
|
"grad_norm": 0.4235173761844635, |
|
"learning_rate": 9.504162453267777e-05, |
|
"loss": 0.0497, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13932868904369855, |
|
"grad_norm": 1.9022583961486816, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 0.1731, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.14186193793540217, |
|
"grad_norm": 0.0696033164858818, |
|
"learning_rate": 8.845153630304139e-05, |
|
"loss": 0.0074, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.14439518682710575, |
|
"grad_norm": 0.38361629843711853, |
|
"learning_rate": 8.517363522881579e-05, |
|
"loss": 0.0308, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.14692843571880937, |
|
"grad_norm": 0.0552719309926033, |
|
"learning_rate": 8.191194656678904e-05, |
|
"loss": 0.0051, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.14946168461051298, |
|
"grad_norm": 0.4546244144439697, |
|
"learning_rate": 7.867003692562534e-05, |
|
"loss": 0.0815, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.1519949335022166, |
|
"grad_norm": 1.4817312955856323, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 0.0728, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15452818239392022, |
|
"grad_norm": 0.29890382289886475, |
|
"learning_rate": 7.225970912381556e-05, |
|
"loss": 0.023, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1570614312856238, |
|
"grad_norm": 0.16949699819087982, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.0116, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.15959468017732742, |
|
"grad_norm": 0.6186251044273376, |
|
"learning_rate": 6.59706825558357e-05, |
|
"loss": 0.1467, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.16212792906903103, |
|
"grad_norm": 0.24091556668281555, |
|
"learning_rate": 6.28802751081779e-05, |
|
"loss": 0.0387, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.16466117796073465, |
|
"grad_norm": 0.2896825969219208, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 0.0173, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.16719442685243824, |
|
"grad_norm": 0.030425388365983963, |
|
"learning_rate": 5.6824564766150726e-05, |
|
"loss": 0.0029, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.16972767574414185, |
|
"grad_norm": 0.41832366585731506, |
|
"learning_rate": 5.386588370213124e-05, |
|
"loss": 0.0403, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.17226092463584547, |
|
"grad_norm": 1.3154964447021484, |
|
"learning_rate": 5.095764961694922e-05, |
|
"loss": 0.2766, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.17479417352754908, |
|
"grad_norm": 1.726725459098816, |
|
"learning_rate": 4.810304262187852e-05, |
|
"loss": 0.4326, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.1773274224192527, |
|
"grad_norm": 0.03612220287322998, |
|
"learning_rate": 4.530518418775733e-05, |
|
"loss": 0.0035, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1798606713109563, |
|
"grad_norm": 0.5889875292778015, |
|
"learning_rate": 4.256713373170564e-05, |
|
"loss": 0.1592, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.1823939202026599, |
|
"grad_norm": 1.068608283996582, |
|
"learning_rate": 3.9891885271697496e-05, |
|
"loss": 0.2189, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.18492716909436352, |
|
"grad_norm": 0.20104023814201355, |
|
"learning_rate": 3.7282364152646297e-05, |
|
"loss": 0.0261, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.18746041798606713, |
|
"grad_norm": 0.7733007073402405, |
|
"learning_rate": 3.4741423847583134e-05, |
|
"loss": 0.1253, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.18999366687777075, |
|
"grad_norm": 1.527602195739746, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 0.1233, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.19252691576947434, |
|
"grad_norm": 0.035904768854379654, |
|
"learning_rate": 2.9876321572751144e-05, |
|
"loss": 0.0033, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.19506016466117795, |
|
"grad_norm": 1.0097168684005737, |
|
"learning_rate": 2.7557479520891104e-05, |
|
"loss": 0.1862, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.19759341355288157, |
|
"grad_norm": 0.6764742732048035, |
|
"learning_rate": 2.5317852301584643e-05, |
|
"loss": 0.1849, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.20012666244458518, |
|
"grad_norm": 0.2706001102924347, |
|
"learning_rate": 2.315988891431412e-05, |
|
"loss": 0.018, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.2026599113362888, |
|
"grad_norm": 0.06733691692352295, |
|
"learning_rate": 2.1085949060360654e-05, |
|
"loss": 0.0067, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2051931602279924, |
|
"grad_norm": 0.09681247174739838, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.0049, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.207726409119696, |
|
"grad_norm": 0.052867814898490906, |
|
"learning_rate": 1.7199116885197995e-05, |
|
"loss": 0.0051, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.21025965801139962, |
|
"grad_norm": 0.16184115409851074, |
|
"learning_rate": 1.5390474757906446e-05, |
|
"loss": 0.0129, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.21279290690310323, |
|
"grad_norm": 1.406388759613037, |
|
"learning_rate": 1.3674351904242611e-05, |
|
"loss": 0.1817, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.21532615579480685, |
|
"grad_norm": 0.08576802909374237, |
|
"learning_rate": 1.2052624879351104e-05, |
|
"loss": 0.007, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.21785940468651044, |
|
"grad_norm": 1.425647497177124, |
|
"learning_rate": 1.0527067017923654e-05, |
|
"loss": 0.1835, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.22039265357821405, |
|
"grad_norm": 0.06112959608435631, |
|
"learning_rate": 9.09934649508375e-06, |
|
"loss": 0.0061, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.22292590246991767, |
|
"grad_norm": 0.14833374321460724, |
|
"learning_rate": 7.771024502261526e-06, |
|
"loss": 0.0081, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.22545915136162129, |
|
"grad_norm": 0.27038395404815674, |
|
"learning_rate": 6.543553540053926e-06, |
|
"loss": 0.0451, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.2279924002533249, |
|
"grad_norm": 0.3804483115673065, |
|
"learning_rate": 5.418275829936537e-06, |
|
"loss": 0.0779, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2305256491450285, |
|
"grad_norm": 0.04810729995369911, |
|
"learning_rate": 4.3964218465642355e-06, |
|
"loss": 0.0033, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.2330588980367321, |
|
"grad_norm": 0.12134478241205215, |
|
"learning_rate": 3.4791089722651436e-06, |
|
"loss": 0.0067, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.23559214692843572, |
|
"grad_norm": 1.7594163417816162, |
|
"learning_rate": 2.667340275199426e-06, |
|
"loss": 0.0616, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.23812539582013934, |
|
"grad_norm": 1.7588186264038086, |
|
"learning_rate": 1.9620034125190644e-06, |
|
"loss": 0.2098, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.24065864471184295, |
|
"grad_norm": 0.091976098716259, |
|
"learning_rate": 1.3638696597277679e-06, |
|
"loss": 0.0068, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.24319189360354654, |
|
"grad_norm": 0.14842090010643005, |
|
"learning_rate": 8.735930673024806e-07, |
|
"loss": 0.0052, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.24572514249525015, |
|
"grad_norm": 0.11459323018789291, |
|
"learning_rate": 4.917097454988584e-07, |
|
"loss": 0.0039, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.24825839138695377, |
|
"grad_norm": 0.2080399990081787, |
|
"learning_rate": 2.1863727812254653e-07, |
|
"loss": 0.007, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.25079164027865736, |
|
"grad_norm": 0.6975619792938232, |
|
"learning_rate": 5.467426590739511e-08, |
|
"loss": 0.0261, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.253324889170361, |
|
"grad_norm": 0.08974107354879379, |
|
"learning_rate": 0.0, |
|
"loss": 0.0033, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.253324889170361, |
|
"eval_loss": 0.12361826002597809, |
|
"eval_runtime": 45.5565, |
|
"eval_samples_per_second": 3.666, |
|
"eval_steps_per_second": 1.844, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.32218823376896e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|