|
{ |
|
"best_metric": 0.14205443859100342, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.44958553833185033, |
|
"eval_steps": 25, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0029972369222123355, |
|
"grad_norm": 7.602510929107666, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 7.3791, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029972369222123355, |
|
"eval_loss": 5.100011348724365, |
|
"eval_runtime": 0.2905, |
|
"eval_samples_per_second": 172.095, |
|
"eval_steps_per_second": 44.745, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005994473844424671, |
|
"grad_norm": 5.811465263366699, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 5.2052, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008991710766637007, |
|
"grad_norm": 6.1456217765808105, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 5.0327, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.011988947688849342, |
|
"grad_norm": 5.857211112976074, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 4.9457, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.014986184611061678, |
|
"grad_norm": 5.512299537658691, |
|
"learning_rate": 7.5e-05, |
|
"loss": 4.8063, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017983421533274015, |
|
"grad_norm": 5.282471179962158, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 4.7355, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02098065845548635, |
|
"grad_norm": 4.267989158630371, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 4.4458, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.023977895377698684, |
|
"grad_norm": 3.799853801727295, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 4.2302, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02697513229991102, |
|
"grad_norm": 3.506157875061035, |
|
"learning_rate": 0.000135, |
|
"loss": 3.857, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.029972369222123357, |
|
"grad_norm": 3.8407812118530273, |
|
"learning_rate": 0.00015, |
|
"loss": 3.749, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03296960614433569, |
|
"grad_norm": 4.470568656921387, |
|
"learning_rate": 0.000165, |
|
"loss": 3.4078, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03596684306654803, |
|
"grad_norm": 4.822042465209961, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 3.0358, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.038964079988760364, |
|
"grad_norm": 5.060558795928955, |
|
"learning_rate": 0.000195, |
|
"loss": 4.5688, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0419613169109727, |
|
"grad_norm": 4.888822078704834, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 3.6099, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04495855383318503, |
|
"grad_norm": 3.4810433387756348, |
|
"learning_rate": 0.000225, |
|
"loss": 2.4291, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04795579075539737, |
|
"grad_norm": 3.577241897583008, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 2.1686, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0509530276776097, |
|
"grad_norm": 3.595766544342041, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 1.9973, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05395026459982204, |
|
"grad_norm": 4.025913715362549, |
|
"learning_rate": 0.00027, |
|
"loss": 1.9723, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05694750152203437, |
|
"grad_norm": 2.864335536956787, |
|
"learning_rate": 0.000285, |
|
"loss": 1.9312, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05994473844424671, |
|
"grad_norm": 3.9801218509674072, |
|
"learning_rate": 0.0003, |
|
"loss": 1.8875, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06294197536645904, |
|
"grad_norm": 3.2217376232147217, |
|
"learning_rate": 0.00029999841345404617, |
|
"loss": 1.5763, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06593921228867138, |
|
"grad_norm": 19.193851470947266, |
|
"learning_rate": 0.0002999936538534755, |
|
"loss": 1.8067, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06893644921088371, |
|
"grad_norm": 25.025583267211914, |
|
"learning_rate": 0.0002999857213101595, |
|
"loss": 1.4576, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07193368613309606, |
|
"grad_norm": 4.855524063110352, |
|
"learning_rate": 0.00029997461601054764, |
|
"loss": 1.0614, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0749309230553084, |
|
"grad_norm": 3.3954389095306396, |
|
"learning_rate": 0.00029996033821566326, |
|
"loss": 0.8964, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0749309230553084, |
|
"eval_loss": 1.0534682273864746, |
|
"eval_runtime": 0.2881, |
|
"eval_samples_per_second": 173.557, |
|
"eval_steps_per_second": 45.125, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07792815997752073, |
|
"grad_norm": 12.105545997619629, |
|
"learning_rate": 0.0002999428882610971, |
|
"loss": 3.0311, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08092539689973306, |
|
"grad_norm": 3.334498882293701, |
|
"learning_rate": 0.00029992226655699945, |
|
"loss": 1.2946, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0839226338219454, |
|
"grad_norm": 2.8498733043670654, |
|
"learning_rate": 0.00029989847358807104, |
|
"loss": 1.1872, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08691987074415773, |
|
"grad_norm": 2.367673873901367, |
|
"learning_rate": 0.0002998715099135508, |
|
"loss": 1.0292, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08991710766637007, |
|
"grad_norm": 8.79304313659668, |
|
"learning_rate": 0.00029984137616720325, |
|
"loss": 1.2032, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0929143445885824, |
|
"grad_norm": 8.142653465270996, |
|
"learning_rate": 0.00029980807305730374, |
|
"loss": 1.2389, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09591158151079474, |
|
"grad_norm": 4.5038628578186035, |
|
"learning_rate": 0.0002997716013666212, |
|
"loss": 1.0933, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09890881843300707, |
|
"grad_norm": 2.439879894256592, |
|
"learning_rate": 0.0002997319619524003, |
|
"loss": 0.922, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1019060553552194, |
|
"grad_norm": 2.002732992172241, |
|
"learning_rate": 0.0002996891557463412, |
|
"loss": 0.8141, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10490329227743174, |
|
"grad_norm": 1.9198921918869019, |
|
"learning_rate": 0.00029964318375457725, |
|
"loss": 0.5868, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10790052919964407, |
|
"grad_norm": 1.1390972137451172, |
|
"learning_rate": 0.00029959404705765186, |
|
"loss": 0.4158, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11089776612185641, |
|
"grad_norm": 0.9061455130577087, |
|
"learning_rate": 0.00029954174681049296, |
|
"loss": 0.3104, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11389500304406874, |
|
"grad_norm": 3.6855151653289795, |
|
"learning_rate": 0.0002994862842423856, |
|
"loss": 1.3318, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11689223996628108, |
|
"grad_norm": 2.992558717727661, |
|
"learning_rate": 0.00029942766065694333, |
|
"loss": 1.0331, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11988947688849343, |
|
"grad_norm": 2.762493133544922, |
|
"learning_rate": 0.00029936587743207736, |
|
"loss": 0.7132, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12288671381070576, |
|
"grad_norm": 2.2291383743286133, |
|
"learning_rate": 0.00029930093601996446, |
|
"loss": 0.5774, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12588395073291808, |
|
"grad_norm": 1.1489959955215454, |
|
"learning_rate": 0.0002992328379470125, |
|
"loss": 0.5567, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12888118765513043, |
|
"grad_norm": 6.168476104736328, |
|
"learning_rate": 0.00029916158481382474, |
|
"loss": 0.6131, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.13187842457734275, |
|
"grad_norm": 5.408211708068848, |
|
"learning_rate": 0.0002990871782951623, |
|
"loss": 0.6638, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1348756614995551, |
|
"grad_norm": 1.5306873321533203, |
|
"learning_rate": 0.0002990096201399045, |
|
"loss": 0.5488, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13787289842176742, |
|
"grad_norm": 1.8623676300048828, |
|
"learning_rate": 0.00029892891217100817, |
|
"loss": 0.4659, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14087013534397977, |
|
"grad_norm": 1.6640721559524536, |
|
"learning_rate": 0.0002988450562854644, |
|
"loss": 0.4368, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14386737226619212, |
|
"grad_norm": 0.3559802174568176, |
|
"learning_rate": 0.0002987580544542541, |
|
"loss": 0.0507, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14686460918840444, |
|
"grad_norm": 0.4540441334247589, |
|
"learning_rate": 0.0002986679087223018, |
|
"loss": 0.087, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1498618461106168, |
|
"grad_norm": 0.8076233267784119, |
|
"learning_rate": 0.00029857462120842744, |
|
"loss": 0.1023, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1498618461106168, |
|
"eval_loss": 0.3283706307411194, |
|
"eval_runtime": 0.2882, |
|
"eval_samples_per_second": 173.489, |
|
"eval_steps_per_second": 45.107, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1528590830328291, |
|
"grad_norm": 4.228235721588135, |
|
"learning_rate": 0.0002984781941052967, |
|
"loss": 1.0819, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.15585631995504146, |
|
"grad_norm": 3.424924612045288, |
|
"learning_rate": 0.0002983786296793692, |
|
"loss": 0.5749, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.15885355687725378, |
|
"grad_norm": 3.9904696941375732, |
|
"learning_rate": 0.00029827593027084546, |
|
"loss": 0.5831, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.16185079379946612, |
|
"grad_norm": 2.694119930267334, |
|
"learning_rate": 0.00029817009829361196, |
|
"loss": 0.4457, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16484803072167845, |
|
"grad_norm": 1.2387803792953491, |
|
"learning_rate": 0.00029806113623518407, |
|
"loss": 0.3431, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1678452676438908, |
|
"grad_norm": 1.6821039915084839, |
|
"learning_rate": 0.0002979490466566481, |
|
"loss": 0.3905, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.17084250456610311, |
|
"grad_norm": 0.6684949994087219, |
|
"learning_rate": 0.00029783383219260037, |
|
"loss": 0.3056, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.17383974148831546, |
|
"grad_norm": 0.7967026233673096, |
|
"learning_rate": 0.0002977154955510861, |
|
"loss": 0.3333, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.17683697841052778, |
|
"grad_norm": 0.5187807083129883, |
|
"learning_rate": 0.0002975940395135351, |
|
"loss": 0.2867, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.17983421533274013, |
|
"grad_norm": 7.2272138595581055, |
|
"learning_rate": 0.00029746946693469693, |
|
"loss": 1.0056, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18283145225495245, |
|
"grad_norm": 8.9348726272583, |
|
"learning_rate": 0.00029734178074257325, |
|
"loss": 0.7786, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1858286891771648, |
|
"grad_norm": 8.19704532623291, |
|
"learning_rate": 0.0002972109839383494, |
|
"loss": 0.3928, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.18882592609937715, |
|
"grad_norm": 4.3927483558654785, |
|
"learning_rate": 0.00029707707959632386, |
|
"loss": 0.7528, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.19182316302158947, |
|
"grad_norm": 2.271043539047241, |
|
"learning_rate": 0.0002969400708638358, |
|
"loss": 0.4877, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.19482039994380182, |
|
"grad_norm": 1.3864595890045166, |
|
"learning_rate": 0.000296799960961191, |
|
"loss": 0.2625, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.19781763686601414, |
|
"grad_norm": 2.2087323665618896, |
|
"learning_rate": 0.00029665675318158656, |
|
"loss": 0.2945, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.2008148737882265, |
|
"grad_norm": 3.766403913497925, |
|
"learning_rate": 0.00029651045089103316, |
|
"loss": 0.3807, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.2038121107104388, |
|
"grad_norm": 2.598832368850708, |
|
"learning_rate": 0.0002963610575282762, |
|
"loss": 0.3149, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.20680934763265116, |
|
"grad_norm": 0.677237331867218, |
|
"learning_rate": 0.0002962085766047146, |
|
"loss": 0.346, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.20980658455486348, |
|
"grad_norm": 0.5142577886581421, |
|
"learning_rate": 0.00029605301170431867, |
|
"loss": 0.2855, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21280382147707583, |
|
"grad_norm": 0.5518949031829834, |
|
"learning_rate": 0.00029589436648354566, |
|
"loss": 0.3163, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.21580105839928815, |
|
"grad_norm": 0.336823046207428, |
|
"learning_rate": 0.00029573264467125377, |
|
"loss": 0.16, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2187982953215005, |
|
"grad_norm": 0.2474360167980194, |
|
"learning_rate": 0.0002955678500686147, |
|
"loss": 0.0297, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.22179553224371282, |
|
"grad_norm": 0.18458165228366852, |
|
"learning_rate": 0.0002953999865490242, |
|
"loss": 0.0609, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.22479276916592517, |
|
"grad_norm": 0.36120983958244324, |
|
"learning_rate": 0.0002952290580580109, |
|
"loss": 0.0862, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.22479276916592517, |
|
"eval_loss": 0.16452732682228088, |
|
"eval_runtime": 0.2888, |
|
"eval_samples_per_second": 173.11, |
|
"eval_steps_per_second": 45.009, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2277900060881375, |
|
"grad_norm": 1.3333512544631958, |
|
"learning_rate": 0.0002950550686131438, |
|
"loss": 0.6146, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.23078724301034984, |
|
"grad_norm": 1.2993559837341309, |
|
"learning_rate": 0.00029487802230393777, |
|
"loss": 0.2574, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.23378447993256216, |
|
"grad_norm": 1.2781016826629639, |
|
"learning_rate": 0.00029469792329175725, |
|
"loss": 0.2978, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.2367817168547745, |
|
"grad_norm": 5.145886421203613, |
|
"learning_rate": 0.0002945147758097187, |
|
"loss": 0.3251, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.23977895377698685, |
|
"grad_norm": 5.573575019836426, |
|
"learning_rate": 0.00029432858416259097, |
|
"loss": 0.3483, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24277619069919917, |
|
"grad_norm": 2.6032469272613525, |
|
"learning_rate": 0.0002941393527266941, |
|
"loss": 0.306, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.24577342762141152, |
|
"grad_norm": 0.6271111965179443, |
|
"learning_rate": 0.00029394708594979657, |
|
"loss": 0.318, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.24877066454362384, |
|
"grad_norm": 0.5439050793647766, |
|
"learning_rate": 0.0002937517883510106, |
|
"loss": 0.2547, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.25176790146583616, |
|
"grad_norm": 0.5188155770301819, |
|
"learning_rate": 0.0002935534645206861, |
|
"loss": 0.2402, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.25476513838804854, |
|
"grad_norm": 1.3832889795303345, |
|
"learning_rate": 0.00029335211912030247, |
|
"loss": 0.147, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.25776237531026086, |
|
"grad_norm": 0.20522421598434448, |
|
"learning_rate": 0.0002931477568823596, |
|
"loss": 0.0365, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.2607596122324732, |
|
"grad_norm": 0.07317493855953217, |
|
"learning_rate": 0.00029294038261026595, |
|
"loss": 0.0178, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.2637568491546855, |
|
"grad_norm": 5.754029273986816, |
|
"learning_rate": 0.0002927300011782263, |
|
"loss": 0.5049, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.2667540860768979, |
|
"grad_norm": 1.9069617986679077, |
|
"learning_rate": 0.0002925166175311266, |
|
"loss": 0.297, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.2697513229991102, |
|
"grad_norm": 1.4803589582443237, |
|
"learning_rate": 0.0002923002366844182, |
|
"loss": 0.2419, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2727485599213225, |
|
"grad_norm": 1.1726033687591553, |
|
"learning_rate": 0.0002920808637239998, |
|
"loss": 0.2449, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.27574579684353484, |
|
"grad_norm": 1.1483900547027588, |
|
"learning_rate": 0.00029185850380609757, |
|
"loss": 0.2845, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.2787430337657472, |
|
"grad_norm": 0.6498438715934753, |
|
"learning_rate": 0.00029163316215714477, |
|
"loss": 0.3168, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.28174027068795954, |
|
"grad_norm": 3.8744821548461914, |
|
"learning_rate": 0.00029140484407365807, |
|
"loss": 0.3098, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.28473750761017186, |
|
"grad_norm": 5.238924026489258, |
|
"learning_rate": 0.00029117355492211345, |
|
"loss": 0.3747, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.28773474453238423, |
|
"grad_norm": 4.835148334503174, |
|
"learning_rate": 0.0002909393001388201, |
|
"loss": 0.311, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.29073198145459656, |
|
"grad_norm": 3.1126749515533447, |
|
"learning_rate": 0.00029070208522979246, |
|
"loss": 0.1933, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.2937292183768089, |
|
"grad_norm": 0.31741341948509216, |
|
"learning_rate": 0.000290461915770621, |
|
"loss": 0.0311, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.2967264552990212, |
|
"grad_norm": 0.13816803693771362, |
|
"learning_rate": 0.00029021879740634106, |
|
"loss": 0.0489, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2997236922212336, |
|
"grad_norm": 0.16050726175308228, |
|
"learning_rate": 0.0002899727358513002, |
|
"loss": 0.0421, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2997236922212336, |
|
"eval_loss": 0.14205443859100342, |
|
"eval_runtime": 0.2881, |
|
"eval_samples_per_second": 173.58, |
|
"eval_steps_per_second": 45.131, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3027209291434459, |
|
"grad_norm": 4.475709438323975, |
|
"learning_rate": 0.0002897237368890237, |
|
"loss": 0.7248, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.3057181660656582, |
|
"grad_norm": 1.3565483093261719, |
|
"learning_rate": 0.00028947180637207894, |
|
"loss": 0.2832, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.30871540298787054, |
|
"grad_norm": 1.5489473342895508, |
|
"learning_rate": 0.0002892169502219377, |
|
"loss": 0.3576, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.3117126399100829, |
|
"grad_norm": 1.2731544971466064, |
|
"learning_rate": 0.00028895917442883697, |
|
"loss": 0.2847, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.31470987683229523, |
|
"grad_norm": 0.5488539934158325, |
|
"learning_rate": 0.000288698485051638, |
|
"loss": 0.2249, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.31770711375450755, |
|
"grad_norm": 0.6603264808654785, |
|
"learning_rate": 0.0002884348882176842, |
|
"loss": 0.2748, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3207043506767199, |
|
"grad_norm": 1.190644383430481, |
|
"learning_rate": 0.0002881683901226569, |
|
"loss": 0.264, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.32370158759893225, |
|
"grad_norm": 3.849586248397827, |
|
"learning_rate": 0.00028789899703042976, |
|
"loss": 0.2899, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.32669882452114457, |
|
"grad_norm": 2.8036534786224365, |
|
"learning_rate": 0.00028762671527292165, |
|
"loss": 0.2266, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.3296960614433569, |
|
"grad_norm": 1.1855659484863281, |
|
"learning_rate": 0.00028735155124994774, |
|
"loss": 0.1432, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.33269329836556927, |
|
"grad_norm": 0.14694784581661224, |
|
"learning_rate": 0.0002870735114290689, |
|
"loss": 0.0225, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.3356905352877816, |
|
"grad_norm": 0.08722022920846939, |
|
"learning_rate": 0.0002867926023454401, |
|
"loss": 0.0243, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.3386877722099939, |
|
"grad_norm": 1.4064624309539795, |
|
"learning_rate": 0.00028650883060165634, |
|
"loss": 0.4202, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.34168500913220623, |
|
"grad_norm": 1.0942851305007935, |
|
"learning_rate": 0.00028622220286759787, |
|
"loss": 0.2193, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.3446822460544186, |
|
"grad_norm": 1.4071762561798096, |
|
"learning_rate": 0.0002859327258802732, |
|
"loss": 0.2772, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.3476794829766309, |
|
"grad_norm": 1.451495885848999, |
|
"learning_rate": 0.0002856404064436606, |
|
"loss": 0.3247, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.35067671989884325, |
|
"grad_norm": 0.8838515281677246, |
|
"learning_rate": 0.0002853452514285487, |
|
"loss": 0.2307, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.35367395682105557, |
|
"grad_norm": 0.43812039494514465, |
|
"learning_rate": 0.0002850472677723743, |
|
"loss": 0.2225, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.35667119374326794, |
|
"grad_norm": 1.919777274131775, |
|
"learning_rate": 0.0002847464624790599, |
|
"loss": 0.2529, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.35966843066548027, |
|
"grad_norm": 3.364579916000366, |
|
"learning_rate": 0.00028444284261884876, |
|
"loss": 0.2957, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3626656675876926, |
|
"grad_norm": 3.1780946254730225, |
|
"learning_rate": 0.0002841364153281389, |
|
"loss": 0.2983, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.3656629045099049, |
|
"grad_norm": 1.9313762187957764, |
|
"learning_rate": 0.000283827187809315, |
|
"loss": 0.1741, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.3686601414321173, |
|
"grad_norm": 0.2901758849620819, |
|
"learning_rate": 0.0002835151673305797, |
|
"loss": 0.0191, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.3716573783543296, |
|
"grad_norm": 0.18410703539848328, |
|
"learning_rate": 0.00028320036122578225, |
|
"loss": 0.0579, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.3746546152765419, |
|
"grad_norm": 0.06442587822675705, |
|
"learning_rate": 0.0002828827768942464, |
|
"loss": 0.0401, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.3746546152765419, |
|
"eval_loss": 0.12974713742733002, |
|
"eval_runtime": 0.2912, |
|
"eval_samples_per_second": 171.727, |
|
"eval_steps_per_second": 44.649, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.3776518521987543, |
|
"grad_norm": 5.953274726867676, |
|
"learning_rate": 0.00028256242180059644, |
|
"loss": 0.5857, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.3806490891209666, |
|
"grad_norm": 1.0578787326812744, |
|
"learning_rate": 0.0002822393034745815, |
|
"loss": 0.1907, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.38364632604317894, |
|
"grad_norm": 1.2669620513916016, |
|
"learning_rate": 0.0002819134295108992, |
|
"loss": 0.2632, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.38664356296539126, |
|
"grad_norm": 1.2083393335342407, |
|
"learning_rate": 0.0002815848075690163, |
|
"loss": 0.2581, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.38964079988760364, |
|
"grad_norm": 0.6875675916671753, |
|
"learning_rate": 0.00028125344537298933, |
|
"loss": 0.2141, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.39263803680981596, |
|
"grad_norm": 0.6001489162445068, |
|
"learning_rate": 0.00028091935071128274, |
|
"loss": 0.2624, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.3956352737320283, |
|
"grad_norm": 0.28877830505371094, |
|
"learning_rate": 0.00028058253143658596, |
|
"loss": 0.2115, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.3986325106542406, |
|
"grad_norm": 0.8344032168388367, |
|
"learning_rate": 0.0002802429954656287, |
|
"loss": 0.234, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.401629747576453, |
|
"grad_norm": 2.311697244644165, |
|
"learning_rate": 0.00027990075077899494, |
|
"loss": 0.173, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.4046269844986653, |
|
"grad_norm": 0.9839431643486023, |
|
"learning_rate": 0.0002795558054209354, |
|
"loss": 0.0744, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.4076242214208776, |
|
"grad_norm": 0.23247428238391876, |
|
"learning_rate": 0.0002792081674991785, |
|
"loss": 0.0324, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.41062145834308994, |
|
"grad_norm": 0.11418312788009644, |
|
"learning_rate": 0.00027885784518473955, |
|
"loss": 0.0338, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.4136186952653023, |
|
"grad_norm": 2.2888922691345215, |
|
"learning_rate": 0.0002785048467117289, |
|
"loss": 0.3951, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.41661593218751464, |
|
"grad_norm": 1.3474727869033813, |
|
"learning_rate": 0.00027814918037715846, |
|
"loss": 0.2827, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.41961316910972696, |
|
"grad_norm": 0.958093523979187, |
|
"learning_rate": 0.0002777908545407464, |
|
"loss": 0.2105, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.4226104060319393, |
|
"grad_norm": 1.1106938123703003, |
|
"learning_rate": 0.00027742987762472104, |
|
"loss": 0.2451, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.42560764295415165, |
|
"grad_norm": 0.991568386554718, |
|
"learning_rate": 0.0002770662581136226, |
|
"loss": 0.2107, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.428604879876364, |
|
"grad_norm": 0.7932851314544678, |
|
"learning_rate": 0.0002767000045541039, |
|
"loss": 0.2097, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.4316021167985763, |
|
"grad_norm": 0.4382745623588562, |
|
"learning_rate": 0.0002763311255547294, |
|
"loss": 0.2098, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.4345993537207887, |
|
"grad_norm": 0.4467020034790039, |
|
"learning_rate": 0.0002759596297857729, |
|
"loss": 0.2249, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.437596590643001, |
|
"grad_norm": 0.7452898025512695, |
|
"learning_rate": 0.0002755855259790139, |
|
"loss": 0.2022, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.4405938275652133, |
|
"grad_norm": 0.8051543831825256, |
|
"learning_rate": 0.000275208822927532, |
|
"loss": 0.2018, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.44359106448742563, |
|
"grad_norm": 0.4602643847465515, |
|
"learning_rate": 0.00027482952948550056, |
|
"loss": 0.0265, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.446588301409638, |
|
"grad_norm": 0.16789375245571136, |
|
"learning_rate": 0.00027444765456797863, |
|
"loss": 0.0358, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.44958553833185033, |
|
"grad_norm": 0.2375824749469757, |
|
"learning_rate": 0.000274063207150701, |
|
"loss": 0.0459, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.44958553833185033, |
|
"eval_loss": 0.26043474674224854, |
|
"eval_runtime": 0.2878, |
|
"eval_samples_per_second": 173.721, |
|
"eval_steps_per_second": 45.167, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 668, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.90057513051095e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|