|
{ |
|
"best_metric": 1.082283802134043e-06, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.1509433962264151, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0015094339622641509, |
|
"grad_norm": 9.727989196777344, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 2.9383, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015094339622641509, |
|
"eval_loss": 5.682714939117432, |
|
"eval_runtime": 4.6749, |
|
"eval_samples_per_second": 10.695, |
|
"eval_steps_per_second": 1.497, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0030188679245283017, |
|
"grad_norm": 9.845698356628418, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 3.161, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004528301886792453, |
|
"grad_norm": 9.684377670288086, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.9913, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0060377358490566035, |
|
"grad_norm": 11.722749710083008, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 2.3506, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007547169811320755, |
|
"grad_norm": 10.132170677185059, |
|
"learning_rate": 0.00015, |
|
"loss": 0.6881, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.009056603773584906, |
|
"grad_norm": 4.307816028594971, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.1223, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010566037735849057, |
|
"grad_norm": 0.7658965587615967, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.0231, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.012075471698113207, |
|
"grad_norm": 0.04265095666050911, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.0011, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013584905660377358, |
|
"grad_norm": 0.017767086625099182, |
|
"learning_rate": 0.00027, |
|
"loss": 0.0003, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01509433962264151, |
|
"grad_norm": 0.011602415703237057, |
|
"learning_rate": 0.0003, |
|
"loss": 0.0002, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01660377358490566, |
|
"grad_norm": 0.03181695565581322, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.0004, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.018113207547169812, |
|
"grad_norm": 0.05330698564648628, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.0005, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.019622641509433963, |
|
"grad_norm": 0.025343570858240128, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.0002, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.021132075471698115, |
|
"grad_norm": 0.0021175649017095566, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.022641509433962263, |
|
"grad_norm": 0.0011911799665540457, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.024150943396226414, |
|
"grad_norm": 0.0003560419427230954, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.025660377358490565, |
|
"grad_norm": 0.00067779456730932, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.027169811320754716, |
|
"grad_norm": 0.0009647771366871893, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.028679245283018868, |
|
"grad_norm": 0.0003761858679354191, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03018867924528302, |
|
"grad_norm": 0.0005870265304110944, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03169811320754717, |
|
"grad_norm": 0.000383986858651042, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03320754716981132, |
|
"grad_norm": 0.005932660773396492, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03471698113207547, |
|
"grad_norm": 36.32376480102539, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.2835, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.036226415094339624, |
|
"grad_norm": 2.1993796825408936, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.0043, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03773584905660377, |
|
"grad_norm": 0.00012767089356202632, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03773584905660377, |
|
"eval_loss": 7.517421181546524e-05, |
|
"eval_runtime": 4.2094, |
|
"eval_samples_per_second": 11.878, |
|
"eval_steps_per_second": 1.663, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03924528301886793, |
|
"grad_norm": 0.0003870428481604904, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.040754716981132075, |
|
"grad_norm": 0.005472894757986069, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04226415094339623, |
|
"grad_norm": 0.07725609093904495, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 0.0004, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04377358490566038, |
|
"grad_norm": 0.02922365628182888, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.0001, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.045283018867924525, |
|
"grad_norm": 0.022537946701049805, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.0001, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04679245283018868, |
|
"grad_norm": 0.003779177786782384, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04830188679245283, |
|
"grad_norm": 0.0010583605617284775, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04981132075471698, |
|
"grad_norm": 0.00034484267234802246, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05132075471698113, |
|
"grad_norm": 0.00039084404124878347, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.052830188679245285, |
|
"grad_norm": 0.0004337659338489175, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05433962264150943, |
|
"grad_norm": 0.0008137256954796612, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05584905660377359, |
|
"grad_norm": 0.0009190964046865702, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.057358490566037736, |
|
"grad_norm": 0.0008511942578479648, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05886792452830188, |
|
"grad_norm": 0.0011096836533397436, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06037735849056604, |
|
"grad_norm": 0.0015806319424882531, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.061886792452830186, |
|
"grad_norm": 0.0018580264877527952, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06339622641509433, |
|
"grad_norm": 0.0010565021075308323, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0649056603773585, |
|
"grad_norm": 0.0007690914790146053, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06641509433962264, |
|
"grad_norm": 0.004054183140397072, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06792452830188679, |
|
"grad_norm": 0.000730887521058321, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06943396226415094, |
|
"grad_norm": 0.00044435824383981526, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0709433962264151, |
|
"grad_norm": 0.00042757357005029917, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07245283018867925, |
|
"grad_norm": 0.0003725688438862562, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0739622641509434, |
|
"grad_norm": 0.001020943047478795, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07547169811320754, |
|
"grad_norm": 0.0002570598153397441, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07547169811320754, |
|
"eval_loss": 2.3195319954538718e-05, |
|
"eval_runtime": 4.2144, |
|
"eval_samples_per_second": 11.864, |
|
"eval_steps_per_second": 1.661, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07698113207547169, |
|
"grad_norm": 0.15251204371452332, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 0.0003, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.07849056603773585, |
|
"grad_norm": 0.005662671290338039, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.0022388154175132513, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.08150943396226415, |
|
"grad_norm": 0.0037454920820891857, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0830188679245283, |
|
"grad_norm": 0.015162762254476547, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08452830188679246, |
|
"grad_norm": 0.002864385489374399, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0860377358490566, |
|
"grad_norm": 0.0016423336928710341, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.08754716981132075, |
|
"grad_norm": 0.0003825683379545808, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0890566037735849, |
|
"grad_norm": 0.00015799446555320174, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.09056603773584905, |
|
"grad_norm": 0.00015713188622612506, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09207547169811321, |
|
"grad_norm": 0.00025742893922142684, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.09358490566037736, |
|
"grad_norm": 0.00015674770111218095, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.09509433962264151, |
|
"grad_norm": 0.00010490669228602201, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.09660377358490566, |
|
"grad_norm": 0.00021542170725297183, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.09811320754716982, |
|
"grad_norm": 0.00013019933248870075, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09962264150943397, |
|
"grad_norm": 0.00012139895261498168, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.10113207547169811, |
|
"grad_norm": 7.941039075376466e-05, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.10264150943396226, |
|
"grad_norm": 0.0001448198891011998, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.10415094339622641, |
|
"grad_norm": 0.00020370357378851622, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.10566037735849057, |
|
"grad_norm": 0.00010941782966256142, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.10716981132075472, |
|
"grad_norm": 0.0001951769954757765, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.10867924528301887, |
|
"grad_norm": 0.0001411143020959571, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.11018867924528301, |
|
"grad_norm": 0.00015462335431948304, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.11169811320754718, |
|
"grad_norm": 0.0002607516653370112, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.11320754716981132, |
|
"grad_norm": 7.921539508970454e-05, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11320754716981132, |
|
"eval_loss": 1.744836140460393e-06, |
|
"eval_runtime": 4.2094, |
|
"eval_samples_per_second": 11.878, |
|
"eval_steps_per_second": 1.663, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11471698113207547, |
|
"grad_norm": 9.14011980057694e-05, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.11622641509433962, |
|
"grad_norm": 0.00013622430560644716, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.11773584905660377, |
|
"grad_norm": 0.00016065326053649187, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.11924528301886793, |
|
"grad_norm": 0.00017030604067258537, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 0.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.12075471698113208, |
|
"grad_norm": 0.0002959435514640063, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12226415094339622, |
|
"grad_norm": 7.727096817689016e-05, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.12377358490566037, |
|
"grad_norm": 0.0001273597008548677, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 0.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.12528301886792453, |
|
"grad_norm": 6.839417619630694e-05, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 0.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.12679245283018867, |
|
"grad_norm": 6.494703120552003e-05, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.12830188679245283, |
|
"grad_norm": 6.701619713567197e-05, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.129811320754717, |
|
"grad_norm": 0.00010753441893029958, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.13132075471698113, |
|
"grad_norm": 0.00015338734374381602, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.1328301886792453, |
|
"grad_norm": 6.136780575616285e-05, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 0.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.13433962264150942, |
|
"grad_norm": 9.956698340829462e-05, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 0.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.13584905660377358, |
|
"grad_norm": 0.0001380999747198075, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.13735849056603774, |
|
"grad_norm": 6.714855408063158e-05, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.13886792452830188, |
|
"grad_norm": 0.00010223900608252734, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 0.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.14037735849056604, |
|
"grad_norm": 0.00013666001905221492, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.1418867924528302, |
|
"grad_norm": 0.0001388599193887785, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 0.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.14339622641509434, |
|
"grad_norm": 9.564343781676143e-05, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1449056603773585, |
|
"grad_norm": 0.00011167208140250295, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.14641509433962263, |
|
"grad_norm": 0.0002111889043590054, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 0.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.1479245283018868, |
|
"grad_norm": 0.00010389567614765838, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.14943396226415095, |
|
"grad_norm": 0.00016280317504424602, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.1509433962264151, |
|
"grad_norm": 0.00014044858107808977, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1509433962264151, |
|
"eval_loss": 1.082283802134043e-06, |
|
"eval_runtime": 4.2074, |
|
"eval_samples_per_second": 11.884, |
|
"eval_steps_per_second": 1.664, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.576510477565952e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|