|
{ |
|
"best_metric": 11.920056343078613, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.015238095238095238, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00015238095238095237, |
|
"grad_norm": 0.01340496726334095, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 11.9277, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00015238095238095237, |
|
"eval_loss": 11.932770729064941, |
|
"eval_runtime": 0.3373, |
|
"eval_samples_per_second": 148.236, |
|
"eval_steps_per_second": 20.753, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00030476190476190474, |
|
"grad_norm": 0.01209985837340355, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 11.9322, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00045714285714285713, |
|
"grad_norm": 0.012650331482291222, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 11.9317, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006095238095238095, |
|
"grad_norm": 0.010324247181415558, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 11.9319, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007619047619047619, |
|
"grad_norm": 0.010827633552253246, |
|
"learning_rate": 0.00015, |
|
"loss": 11.9311, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009142857142857143, |
|
"grad_norm": 0.01144388411194086, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 11.9307, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 0.01133989542722702, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 11.9306, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.001219047619047619, |
|
"grad_norm": 0.009713858366012573, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 11.9315, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0013714285714285714, |
|
"grad_norm": 0.011050593107938766, |
|
"learning_rate": 0.00027, |
|
"loss": 11.9319, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0015238095238095239, |
|
"grad_norm": 0.010406149551272392, |
|
"learning_rate": 0.0003, |
|
"loss": 11.9312, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001676190476190476, |
|
"grad_norm": 0.011947591789066792, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 11.933, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0018285714285714285, |
|
"grad_norm": 0.01505028735846281, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 11.9299, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.001980952380952381, |
|
"grad_norm": 0.01627309061586857, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 11.9306, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 0.013440248556435108, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 11.9314, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002285714285714286, |
|
"grad_norm": 0.013199551962316036, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 11.9304, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002438095238095238, |
|
"grad_norm": 0.016703762114048004, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 11.9309, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0025904761904761904, |
|
"grad_norm": 0.022426387295126915, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 11.9301, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.002742857142857143, |
|
"grad_norm": 0.016040390357375145, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 11.9312, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0028952380952380953, |
|
"grad_norm": 0.03560283035039902, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 11.93, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0030476190476190477, |
|
"grad_norm": 0.024731332436203957, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 11.9307, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0032, |
|
"grad_norm": 0.029315967112779617, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 11.9301, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003352380952380952, |
|
"grad_norm": 0.03906835615634918, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 11.9296, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0035047619047619046, |
|
"grad_norm": 0.026998696848750114, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 11.9312, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.003657142857142857, |
|
"grad_norm": 0.03217875957489014, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 11.9292, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0038095238095238095, |
|
"grad_norm": 0.03475233539938927, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 11.9288, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0038095238095238095, |
|
"eval_loss": 11.930586814880371, |
|
"eval_runtime": 0.3295, |
|
"eval_samples_per_second": 151.738, |
|
"eval_steps_per_second": 21.243, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.003961904761904762, |
|
"grad_norm": 0.049299657344818115, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 11.9276, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.004114285714285714, |
|
"grad_norm": 0.040789611637592316, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 11.928, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.04697525128722191, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 11.9287, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004419047619047619, |
|
"grad_norm": 0.057105280458927155, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 11.9274, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.004571428571428572, |
|
"grad_norm": 0.05554262176156044, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 11.9263, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004723809523809524, |
|
"grad_norm": 0.059702690690755844, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 11.9256, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.004876190476190476, |
|
"grad_norm": 0.05958414077758789, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 11.9274, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005028571428571428, |
|
"grad_norm": 0.06670510023832321, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 11.9244, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005180952380952381, |
|
"grad_norm": 0.0594896636903286, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 11.9259, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.08420071005821228, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 11.9243, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005485714285714286, |
|
"grad_norm": 0.06888625770807266, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 11.9224, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005638095238095238, |
|
"grad_norm": 0.07287950813770294, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 11.9234, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0057904761904761905, |
|
"grad_norm": 0.04395854473114014, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 11.9249, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.005942857142857143, |
|
"grad_norm": 0.05748274549841881, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 11.9233, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006095238095238095, |
|
"grad_norm": 0.08280626684427261, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 11.9219, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.006247619047619048, |
|
"grad_norm": 0.049897756427526474, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 11.9233, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0064, |
|
"grad_norm": 0.049588147550821304, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 11.9248, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006552380952380953, |
|
"grad_norm": 0.0516531839966774, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 11.9216, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006704761904761904, |
|
"grad_norm": 0.03678146004676819, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 11.9212, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006857142857142857, |
|
"grad_norm": 0.046402495354413986, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 11.9203, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.007009523809523809, |
|
"grad_norm": 0.05032557621598244, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 11.9188, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007161904761904762, |
|
"grad_norm": 0.038833487778902054, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 11.9214, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007314285714285714, |
|
"grad_norm": 0.04270482808351517, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 11.918, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007466666666666667, |
|
"grad_norm": 0.05103209614753723, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 11.9203, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007619047619047619, |
|
"grad_norm": 0.050319772213697433, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 11.9171, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007619047619047619, |
|
"eval_loss": 11.921985626220703, |
|
"eval_runtime": 0.3251, |
|
"eval_samples_per_second": 153.816, |
|
"eval_steps_per_second": 21.534, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0077714285714285715, |
|
"grad_norm": 0.04554375633597374, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 11.9228, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.007923809523809524, |
|
"grad_norm": 0.04022262990474701, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 11.9199, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.008076190476190476, |
|
"grad_norm": 0.033325254917144775, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 11.9218, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.008228571428571429, |
|
"grad_norm": 0.04107801243662834, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 11.9205, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.008380952380952381, |
|
"grad_norm": 0.027698513120412827, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 11.9217, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.008533333333333334, |
|
"grad_norm": 0.03343752399086952, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 11.9195, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.008685714285714286, |
|
"grad_norm": 0.03319195285439491, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 11.9203, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.008838095238095239, |
|
"grad_norm": 0.03573085740208626, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 11.9196, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.008990476190476191, |
|
"grad_norm": 0.01422872580587864, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 11.919, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.009142857142857144, |
|
"grad_norm": 0.026226388290524483, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 11.9193, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.009295238095238096, |
|
"grad_norm": 0.021446671336889267, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 11.9194, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.009447619047619048, |
|
"grad_norm": 0.014439309947192669, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 11.919, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0096, |
|
"grad_norm": 0.037217725068330765, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 11.9196, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.009752380952380952, |
|
"grad_norm": 0.023248059675097466, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 11.9185, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.009904761904761904, |
|
"grad_norm": 0.014870691113173962, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 11.9191, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.010057142857142857, |
|
"grad_norm": 0.02193147875368595, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 11.9207, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.010209523809523809, |
|
"grad_norm": 0.017094288021326065, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 11.9193, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.010361904761904761, |
|
"grad_norm": 0.009804649278521538, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 11.9193, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.010514285714285714, |
|
"grad_norm": 0.0146781075745821, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 11.917, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 0.019102444872260094, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 11.9194, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.010819047619047619, |
|
"grad_norm": 0.02413802035152912, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 11.9202, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.010971428571428571, |
|
"grad_norm": 0.013724091462790966, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 11.918, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.011123809523809524, |
|
"grad_norm": 0.015044189058244228, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 11.9201, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.011276190476190476, |
|
"grad_norm": 0.01973019354045391, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 11.9213, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.011428571428571429, |
|
"grad_norm": 0.01863853819668293, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 11.919, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.011428571428571429, |
|
"eval_loss": 11.919951438903809, |
|
"eval_runtime": 0.3345, |
|
"eval_samples_per_second": 149.462, |
|
"eval_steps_per_second": 20.925, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.011580952380952381, |
|
"grad_norm": 0.016588257625699043, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 11.9198, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.011733333333333333, |
|
"grad_norm": 0.012788133695721626, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 11.9192, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.011885714285714286, |
|
"grad_norm": 0.019622452557086945, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 11.9176, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.012038095238095238, |
|
"grad_norm": 0.0162656269967556, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 11.9194, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.01219047619047619, |
|
"grad_norm": 0.012881246395409107, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 11.9186, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.012342857142857143, |
|
"grad_norm": 0.025122717022895813, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 11.9199, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.012495238095238096, |
|
"grad_norm": 0.020889850333333015, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 11.9174, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.012647619047619048, |
|
"grad_norm": 0.013086607679724693, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 11.9196, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0128, |
|
"grad_norm": 0.021250098943710327, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 11.918, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.012952380952380953, |
|
"grad_norm": 0.015169636346399784, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 11.9175, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.013104761904761906, |
|
"grad_norm": 0.025091208517551422, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 11.9211, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.013257142857142858, |
|
"grad_norm": 0.020077206194400787, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 11.9199, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.013409523809523809, |
|
"grad_norm": 0.03454453870654106, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 11.9181, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.013561904761904761, |
|
"grad_norm": 0.02208181656897068, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 11.9196, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.013714285714285714, |
|
"grad_norm": 0.021397177129983902, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 11.923, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.013866666666666666, |
|
"grad_norm": 0.025557834655046463, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 11.9196, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.014019047619047618, |
|
"grad_norm": 0.02345367893576622, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 11.9207, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.014171428571428571, |
|
"grad_norm": 0.02872537262737751, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 11.9172, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.014323809523809523, |
|
"grad_norm": 0.02754749357700348, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 11.9197, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.014476190476190476, |
|
"grad_norm": 0.025062963366508484, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 11.9182, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.014628571428571428, |
|
"grad_norm": 0.022489259019494057, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 11.9189, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.01478095238095238, |
|
"grad_norm": 0.01625611074268818, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 11.9195, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.014933333333333333, |
|
"grad_norm": 0.024079998955130577, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 11.9175, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.015085714285714286, |
|
"grad_norm": 0.028555238619446754, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 11.9192, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.015238095238095238, |
|
"grad_norm": 0.050129834562540054, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 11.9216, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.015238095238095238, |
|
"eval_loss": 11.920056343078613, |
|
"eval_runtime": 0.3197, |
|
"eval_samples_per_second": 156.391, |
|
"eval_steps_per_second": 21.895, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 162049400832.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|