|
{ |
|
"best_metric": 6.682973861694336, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-150", |
|
"epoch": 0.96, |
|
"eval_steps": 25, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0064, |
|
"grad_norm": 0.9495358467102051, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 6.9341, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0064, |
|
"eval_loss": 6.940670013427734, |
|
"eval_runtime": 0.4624, |
|
"eval_samples_per_second": 108.123, |
|
"eval_steps_per_second": 15.137, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0128, |
|
"grad_norm": 0.8018835186958313, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 6.9556, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0192, |
|
"grad_norm": 0.9049068093299866, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 6.9653, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0256, |
|
"grad_norm": 0.7154098749160767, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 6.9551, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.032, |
|
"grad_norm": 0.7742681503295898, |
|
"learning_rate": 0.00015, |
|
"loss": 6.9498, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0384, |
|
"grad_norm": 0.8399883508682251, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 6.94, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0448, |
|
"grad_norm": 0.5200990438461304, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 6.9413, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0512, |
|
"grad_norm": 0.6034143567085266, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 6.9438, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0576, |
|
"grad_norm": 0.5844438672065735, |
|
"learning_rate": 0.00027, |
|
"loss": 6.9437, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.064, |
|
"grad_norm": 0.6551675796508789, |
|
"learning_rate": 0.0003, |
|
"loss": 6.943, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0704, |
|
"grad_norm": 0.6681671142578125, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 6.9383, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0768, |
|
"grad_norm": 0.6732079386711121, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 6.9406, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0832, |
|
"grad_norm": 0.6102283596992493, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 6.9383, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0896, |
|
"grad_norm": 0.6549004912376404, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 6.9284, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.096, |
|
"grad_norm": 0.5962654948234558, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 6.9047, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1024, |
|
"grad_norm": 0.649928867816925, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 6.9212, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1088, |
|
"grad_norm": 0.504554808139801, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 6.9179, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1152, |
|
"grad_norm": 0.597122848033905, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 6.9112, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1216, |
|
"grad_norm": 0.5620957612991333, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 6.9046, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.128, |
|
"grad_norm": 0.6724961400032043, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 6.8993, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1344, |
|
"grad_norm": 0.5969198346138, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 6.8979, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1408, |
|
"grad_norm": 0.5506598949432373, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 6.8887, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1472, |
|
"grad_norm": 0.7546066641807556, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 6.8619, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1536, |
|
"grad_norm": 0.6300695538520813, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 6.8506, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.5741549134254456, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 6.8638, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 6.874875545501709, |
|
"eval_runtime": 0.047, |
|
"eval_samples_per_second": 1063.777, |
|
"eval_steps_per_second": 148.929, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1664, |
|
"grad_norm": 0.5633116364479065, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 6.8588, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.1728, |
|
"grad_norm": 0.6096696853637695, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 6.8797, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1792, |
|
"grad_norm": 0.5343248844146729, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 6.8702, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.1856, |
|
"grad_norm": 0.6776606440544128, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 6.84, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.192, |
|
"grad_norm": 0.5798454880714417, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 6.8618, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1984, |
|
"grad_norm": 0.6195558905601501, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 6.8286, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2048, |
|
"grad_norm": 0.556268036365509, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 6.8496, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2112, |
|
"grad_norm": 0.5736655592918396, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 6.8319, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2176, |
|
"grad_norm": 0.5479353666305542, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 6.8609, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.224, |
|
"grad_norm": 0.8071677684783936, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 6.9076, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2304, |
|
"grad_norm": 0.775070071220398, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 6.9059, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2368, |
|
"grad_norm": 0.7527448534965515, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 6.8908, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2432, |
|
"grad_norm": 0.911148726940155, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 6.8912, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2496, |
|
"grad_norm": 1.4041032791137695, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 6.8862, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.256, |
|
"grad_norm": 0.9864165186882019, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 6.9131, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2624, |
|
"grad_norm": 0.8453418612480164, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 6.9059, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2688, |
|
"grad_norm": 0.77274489402771, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 6.8597, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2752, |
|
"grad_norm": 0.7312727570533752, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 6.8523, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2816, |
|
"grad_norm": 0.7589954137802124, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 6.8514, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.288, |
|
"grad_norm": 0.588603138923645, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 6.7881, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2944, |
|
"grad_norm": 0.632996678352356, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 6.7755, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.3008, |
|
"grad_norm": 0.575196385383606, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 6.79, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3072, |
|
"grad_norm": 0.5218028426170349, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 6.7852, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3136, |
|
"grad_norm": 0.5754166841506958, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 6.7727, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.5350818634033203, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 6.7583, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 6.796112060546875, |
|
"eval_runtime": 0.0455, |
|
"eval_samples_per_second": 1098.877, |
|
"eval_steps_per_second": 153.843, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3264, |
|
"grad_norm": 0.4826822578907013, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 6.7629, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.3328, |
|
"grad_norm": 0.508161187171936, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 6.7403, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.3392, |
|
"grad_norm": 0.5200028419494629, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 6.7561, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.3456, |
|
"grad_norm": 0.49363598227500916, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 6.7514, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.352, |
|
"grad_norm": 0.46784576773643494, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 6.7524, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3584, |
|
"grad_norm": 0.46129316091537476, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 6.7433, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.3648, |
|
"grad_norm": 0.5088637471199036, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 6.7277, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.3712, |
|
"grad_norm": 0.5694301724433899, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 6.7277, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3776, |
|
"grad_norm": 0.41760918498039246, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 6.7531, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.384, |
|
"grad_norm": 0.4982942044734955, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 6.729, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3904, |
|
"grad_norm": 0.4909391701221466, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 6.7228, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3968, |
|
"grad_norm": 0.5273060202598572, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 6.7134, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.4032, |
|
"grad_norm": 0.4253634512424469, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 6.7223, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4096, |
|
"grad_norm": 0.47950324416160583, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 6.718, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.416, |
|
"grad_norm": 0.46385979652404785, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 6.725, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4224, |
|
"grad_norm": 0.540244996547699, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 6.7151, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.4288, |
|
"grad_norm": 0.4589102864265442, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 6.7114, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.4352, |
|
"grad_norm": 0.4630209505558014, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 6.7337, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.4416, |
|
"grad_norm": 0.47441697120666504, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 6.7308, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.448, |
|
"grad_norm": 0.49404001235961914, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 6.7455, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4544, |
|
"grad_norm": 0.44997942447662354, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 6.739, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.4608, |
|
"grad_norm": 0.45261090993881226, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 6.7068, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.4672, |
|
"grad_norm": 0.44797801971435547, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 6.7162, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.4736, |
|
"grad_norm": 0.6951871514320374, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 6.7901, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.7108526825904846, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 6.8099, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_loss": 6.746245861053467, |
|
"eval_runtime": 0.0445, |
|
"eval_samples_per_second": 1123.84, |
|
"eval_steps_per_second": 157.338, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4864, |
|
"grad_norm": 0.7808616161346436, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 6.8143, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.4928, |
|
"grad_norm": 0.7759772539138794, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 6.8032, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.4992, |
|
"grad_norm": 1.509546160697937, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 6.7856, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.5056, |
|
"grad_norm": 1.1290587186813354, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 6.8949, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.512, |
|
"grad_norm": 0.9573540687561035, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 6.887, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5184, |
|
"grad_norm": 1.0016741752624512, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 6.8455, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.5248, |
|
"grad_norm": 0.7958142161369324, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 6.7827, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.5312, |
|
"grad_norm": 0.6969197392463684, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 6.7816, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.5376, |
|
"grad_norm": 0.8457598090171814, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 6.7871, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.544, |
|
"grad_norm": 0.5099653005599976, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 6.6791, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.5504, |
|
"grad_norm": 0.5534845590591431, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 6.685, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.5568, |
|
"grad_norm": 0.5635961294174194, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 6.694, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.5632, |
|
"grad_norm": 0.5438621640205383, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 6.6969, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.5696, |
|
"grad_norm": 0.5128461122512817, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 6.6693, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.576, |
|
"grad_norm": 0.6132482290267944, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 6.6821, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5824, |
|
"grad_norm": 0.5470501184463501, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 6.6796, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.5888, |
|
"grad_norm": 0.5392007231712341, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 6.6488, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.5952, |
|
"grad_norm": 0.6648130416870117, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 6.6436, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.6016, |
|
"grad_norm": 0.6511263847351074, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 6.6311, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.608, |
|
"grad_norm": 0.6294538974761963, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 6.627, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.6144, |
|
"grad_norm": 0.48442429304122925, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 6.6497, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.6208, |
|
"grad_norm": 0.5661095976829529, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 6.6348, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.6272, |
|
"grad_norm": 0.47108545899391174, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 6.6478, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.6336, |
|
"grad_norm": 0.43120548129081726, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 6.6553, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.47978243231773376, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 6.6235, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_loss": 6.714487075805664, |
|
"eval_runtime": 0.0452, |
|
"eval_samples_per_second": 1106.805, |
|
"eval_steps_per_second": 154.953, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6464, |
|
"grad_norm": 0.442552387714386, |
|
"learning_rate": 0.0001599135876488549, |
|
"loss": 6.6315, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.6528, |
|
"grad_norm": 0.4487893283367157, |
|
"learning_rate": 0.00015743756320098332, |
|
"loss": 6.641, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.6592, |
|
"grad_norm": 0.4315354824066162, |
|
"learning_rate": 0.0001549595053975962, |
|
"loss": 6.6571, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.6656, |
|
"grad_norm": 0.44835346937179565, |
|
"learning_rate": 0.00015248009171495378, |
|
"loss": 6.6217, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.672, |
|
"grad_norm": 0.38457152247428894, |
|
"learning_rate": 0.00015, |
|
"loss": 6.6733, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.6784, |
|
"grad_norm": 0.44234663248062134, |
|
"learning_rate": 0.00014751990828504622, |
|
"loss": 6.6458, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.6848, |
|
"grad_norm": 0.5025720000267029, |
|
"learning_rate": 0.00014504049460240375, |
|
"loss": 6.6981, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.6912, |
|
"grad_norm": 0.467263400554657, |
|
"learning_rate": 0.00014256243679901663, |
|
"loss": 6.6535, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.6976, |
|
"grad_norm": 0.4463280141353607, |
|
"learning_rate": 0.00014008641235114508, |
|
"loss": 6.6794, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.704, |
|
"grad_norm": 0.5022115111351013, |
|
"learning_rate": 0.00013761309817915014, |
|
"loss": 6.6662, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7104, |
|
"grad_norm": 0.47702935338020325, |
|
"learning_rate": 0.00013514317046243058, |
|
"loss": 6.6882, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.7168, |
|
"grad_norm": 0.6275339722633362, |
|
"learning_rate": 0.00013267730445456208, |
|
"loss": 6.7397, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.7232, |
|
"grad_norm": 0.8845940828323364, |
|
"learning_rate": 0.00013021617429868963, |
|
"loss": 6.7815, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.7296, |
|
"grad_norm": 0.8113049864768982, |
|
"learning_rate": 0.00012776045284322368, |
|
"loss": 6.7837, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.736, |
|
"grad_norm": 0.8991756439208984, |
|
"learning_rate": 0.00012531081145788987, |
|
"loss": 6.7817, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.7424, |
|
"grad_norm": 1.0973552465438843, |
|
"learning_rate": 0.00012286791985018355, |
|
"loss": 6.7701, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.7488, |
|
"grad_norm": 1.67232346534729, |
|
"learning_rate": 0.00012043244588227796, |
|
"loss": 6.7657, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.7552, |
|
"grad_norm": 1.071197748184204, |
|
"learning_rate": 0.00011800505538843798, |
|
"loss": 6.8816, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.7616, |
|
"grad_norm": 0.8870216608047485, |
|
"learning_rate": 0.00011558641199298727, |
|
"loss": 6.854, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.768, |
|
"grad_norm": 0.8856467604637146, |
|
"learning_rate": 0.00011317717692888012, |
|
"loss": 6.8337, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.7744, |
|
"grad_norm": 0.7022458910942078, |
|
"learning_rate": 0.00011077800885692702, |
|
"loss": 6.7082, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.7808, |
|
"grad_norm": 0.8062697052955627, |
|
"learning_rate": 0.00010838956368572334, |
|
"loss": 6.7377, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.7872, |
|
"grad_norm": 0.9013355374336243, |
|
"learning_rate": 0.0001060124943923303, |
|
"loss": 6.7669, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.7936, |
|
"grad_norm": 0.5724779367446899, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 6.6174, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.5374643802642822, |
|
"learning_rate": 0.00010129507961929748, |
|
"loss": 6.6612, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 6.694909572601318, |
|
"eval_runtime": 0.0438, |
|
"eval_samples_per_second": 1142.364, |
|
"eval_steps_per_second": 159.931, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.8064, |
|
"grad_norm": 0.5468215346336365, |
|
"learning_rate": 9.895602383375353e-05, |
|
"loss": 6.667, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.8128, |
|
"grad_norm": 0.4469563364982605, |
|
"learning_rate": 9.663092296162251e-05, |
|
"loss": 6.6637, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.8192, |
|
"grad_norm": 0.6111487150192261, |
|
"learning_rate": 9.432041266226686e-05, |
|
"loss": 6.6008, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.8256, |
|
"grad_norm": 0.46038818359375, |
|
"learning_rate": 9.202512460613219e-05, |
|
"loss": 6.6134, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.832, |
|
"grad_norm": 0.5608881115913391, |
|
"learning_rate": 8.97456863020546e-05, |
|
"loss": 6.5824, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8384, |
|
"grad_norm": 0.5066159963607788, |
|
"learning_rate": 8.748272092570646e-05, |
|
"loss": 6.5936, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.8448, |
|
"grad_norm": 0.35695910453796387, |
|
"learning_rate": 8.523684714922608e-05, |
|
"loss": 6.6303, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.8512, |
|
"grad_norm": 0.5033596158027649, |
|
"learning_rate": 8.300867897207903e-05, |
|
"loss": 6.583, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.8576, |
|
"grad_norm": 0.417173832654953, |
|
"learning_rate": 8.079882555319684e-05, |
|
"loss": 6.6496, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.864, |
|
"grad_norm": 0.3751719892024994, |
|
"learning_rate": 7.860789104443896e-05, |
|
"loss": 6.6507, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.8704, |
|
"grad_norm": 0.3995749056339264, |
|
"learning_rate": 7.643647442542382e-05, |
|
"loss": 6.5948, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.8768, |
|
"grad_norm": 0.4013768136501312, |
|
"learning_rate": 7.428516933977347e-05, |
|
"loss": 6.6164, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.8832, |
|
"grad_norm": 0.3744851052761078, |
|
"learning_rate": 7.215456393281776e-05, |
|
"loss": 6.6189, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.8896, |
|
"grad_norm": 0.3870219886302948, |
|
"learning_rate": 7.004524069080096e-05, |
|
"loss": 6.6008, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.896, |
|
"grad_norm": 0.42525532841682434, |
|
"learning_rate": 6.795777628163599e-05, |
|
"loss": 6.6112, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9024, |
|
"grad_norm": 0.3942902088165283, |
|
"learning_rate": 6.58927413972491e-05, |
|
"loss": 6.5992, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.9088, |
|
"grad_norm": 0.48578906059265137, |
|
"learning_rate": 6.385070059755846e-05, |
|
"loss": 6.5816, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.9152, |
|
"grad_norm": 0.36412331461906433, |
|
"learning_rate": 6.183221215612904e-05, |
|
"loss": 6.6061, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.9216, |
|
"grad_norm": 0.4250425398349762, |
|
"learning_rate": 5.983782790754623e-05, |
|
"loss": 6.6691, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.928, |
|
"grad_norm": 0.4022601842880249, |
|
"learning_rate": 5.786809309654982e-05, |
|
"loss": 6.6641, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.9344, |
|
"grad_norm": 0.5123599171638489, |
|
"learning_rate": 5.592354622896944e-05, |
|
"loss": 6.6763, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.9408, |
|
"grad_norm": 0.44589608907699585, |
|
"learning_rate": 5.40047189245025e-05, |
|
"loss": 6.6136, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.9472, |
|
"grad_norm": 0.4042072892189026, |
|
"learning_rate": 5.211213577137469e-05, |
|
"loss": 6.6563, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.9536, |
|
"grad_norm": 0.4555412232875824, |
|
"learning_rate": 5.024631418292274e-05, |
|
"loss": 6.661, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.3993721008300781, |
|
"learning_rate": 4.840776425613886e-05, |
|
"loss": 6.674, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_loss": 6.682973861694336, |
|
"eval_runtime": 0.0454, |
|
"eval_samples_per_second": 1101.37, |
|
"eval_steps_per_second": 154.192, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1878403276800.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|