|
{ |
|
"best_metric": 0.6224383115768433, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.14641288433382138, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014641288433382138, |
|
"grad_norm": 0.9750430583953857, |
|
"learning_rate": 4e-05, |
|
"loss": 1.0625, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014641288433382138, |
|
"eval_loss": 1.124760389328003, |
|
"eval_runtime": 76.9259, |
|
"eval_samples_per_second": 3.744, |
|
"eval_steps_per_second": 1.872, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029282576866764276, |
|
"grad_norm": 1.0913565158843994, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0917, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004392386530014641, |
|
"grad_norm": 1.0702697038650513, |
|
"learning_rate": 0.00012, |
|
"loss": 1.0758, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005856515373352855, |
|
"grad_norm": 1.0503638982772827, |
|
"learning_rate": 0.00016, |
|
"loss": 1.0126, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007320644216691069, |
|
"grad_norm": 1.3807379007339478, |
|
"learning_rate": 0.0002, |
|
"loss": 0.9423, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008784773060029283, |
|
"grad_norm": 1.5241297483444214, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 0.9538, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010248901903367497, |
|
"grad_norm": 1.7318557500839233, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 0.8408, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01171303074670571, |
|
"grad_norm": 1.2110615968704224, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 0.8412, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013177159590043924, |
|
"grad_norm": 0.9903315901756287, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 0.8473, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014641288433382138, |
|
"grad_norm": 0.8798181414604187, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 0.8267, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.016105417276720352, |
|
"grad_norm": 0.9941743016242981, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 0.8427, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.017569546120058566, |
|
"grad_norm": 0.9219509959220886, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 0.7463, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01903367496339678, |
|
"grad_norm": 0.8963194489479065, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 0.8373, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.020497803806734993, |
|
"grad_norm": 0.9019066691398621, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 0.754, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.021961932650073207, |
|
"grad_norm": 1.0534106492996216, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 0.7813, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02342606149341142, |
|
"grad_norm": 0.8860576152801514, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 0.7497, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.024890190336749635, |
|
"grad_norm": 0.9496033191680908, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 0.6888, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02635431918008785, |
|
"grad_norm": 0.9020019173622131, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 0.7641, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.027818448023426062, |
|
"grad_norm": 0.8967961668968201, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 0.75, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.029282576866764276, |
|
"grad_norm": 0.8483291268348694, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 0.6926, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03074670571010249, |
|
"grad_norm": 0.9041450023651123, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 0.7076, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.032210834553440704, |
|
"grad_norm": 0.8793591260910034, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.6334, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03367496339677892, |
|
"grad_norm": 0.9118833541870117, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 0.7029, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03513909224011713, |
|
"grad_norm": 0.9427176117897034, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.7891, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.036603221083455345, |
|
"grad_norm": 0.9533204436302185, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.8176, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.036603221083455345, |
|
"eval_loss": 0.7120001316070557, |
|
"eval_runtime": 77.4992, |
|
"eval_samples_per_second": 3.716, |
|
"eval_steps_per_second": 1.858, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03806734992679356, |
|
"grad_norm": 0.9075884819030762, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.7545, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03953147877013177, |
|
"grad_norm": 0.8508671522140503, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 0.6666, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.040995607613469986, |
|
"grad_norm": 0.830573558807373, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.696, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0424597364568082, |
|
"grad_norm": 0.816193163394928, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 0.7688, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.043923865300146414, |
|
"grad_norm": 0.9093814492225647, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 0.6717, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04538799414348463, |
|
"grad_norm": 0.8094978332519531, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 0.588, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04685212298682284, |
|
"grad_norm": 0.8796850442886353, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.7133, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.048316251830161056, |
|
"grad_norm": 0.9510671496391296, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 0.6754, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04978038067349927, |
|
"grad_norm": 0.9171980023384094, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.665, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05124450951683748, |
|
"grad_norm": 0.9319704174995422, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 0.6215, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0527086383601757, |
|
"grad_norm": 0.9619579911231995, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.7145, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05417276720351391, |
|
"grad_norm": 0.8880752921104431, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 0.6216, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.055636896046852125, |
|
"grad_norm": 0.9341321587562561, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.7046, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05710102489019034, |
|
"grad_norm": 0.8551350235939026, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 0.6551, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05856515373352855, |
|
"grad_norm": 0.9286485314369202, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 0.6471, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.060029282576866766, |
|
"grad_norm": 0.8776522278785706, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 0.6324, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06149341142020498, |
|
"grad_norm": 0.859512984752655, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 0.6475, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0629575402635432, |
|
"grad_norm": 0.8624581694602966, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.6477, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06442166910688141, |
|
"grad_norm": 0.8696426749229431, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 0.6571, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06588579795021962, |
|
"grad_norm": 0.8606180548667908, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 0.6489, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06734992679355783, |
|
"grad_norm": 0.8928582668304443, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 0.6775, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06881405563689605, |
|
"grad_norm": 0.9211382865905762, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 0.5962, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07027818448023426, |
|
"grad_norm": 0.9554921388626099, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 0.6437, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07174231332357248, |
|
"grad_norm": 0.9748461842536926, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 0.6481, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07320644216691069, |
|
"grad_norm": 1.0362958908081055, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.6812, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07320644216691069, |
|
"eval_loss": 0.6729180812835693, |
|
"eval_runtime": 77.5069, |
|
"eval_samples_per_second": 3.716, |
|
"eval_steps_per_second": 1.858, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0746705710102489, |
|
"grad_norm": 1.0011053085327148, |
|
"learning_rate": 0.00010495837546732224, |
|
"loss": 0.6524, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.07613469985358712, |
|
"grad_norm": 1.0353124141693115, |
|
"learning_rate": 0.00010165339447663587, |
|
"loss": 0.7559, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.07759882869692533, |
|
"grad_norm": 0.8761913776397705, |
|
"learning_rate": 9.834660552336415e-05, |
|
"loss": 0.7059, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.07906295754026355, |
|
"grad_norm": 0.8307029008865356, |
|
"learning_rate": 9.504162453267777e-05, |
|
"loss": 0.6997, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.08052708638360176, |
|
"grad_norm": 0.7661670446395874, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 0.7013, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08199121522693997, |
|
"grad_norm": 0.7999428510665894, |
|
"learning_rate": 8.845153630304139e-05, |
|
"loss": 0.676, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.08345534407027819, |
|
"grad_norm": 0.7628665566444397, |
|
"learning_rate": 8.517363522881579e-05, |
|
"loss": 0.655, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0849194729136164, |
|
"grad_norm": 0.7992427945137024, |
|
"learning_rate": 8.191194656678904e-05, |
|
"loss": 0.6059, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.08638360175695461, |
|
"grad_norm": 0.7833818197250366, |
|
"learning_rate": 7.867003692562534e-05, |
|
"loss": 0.7034, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.08784773060029283, |
|
"grad_norm": 0.7682210206985474, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 0.6838, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.08931185944363104, |
|
"grad_norm": 0.7837997078895569, |
|
"learning_rate": 7.225970912381556e-05, |
|
"loss": 0.7132, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.09077598828696926, |
|
"grad_norm": 0.7890181541442871, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.7043, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.09224011713030747, |
|
"grad_norm": 0.7309653759002686, |
|
"learning_rate": 6.59706825558357e-05, |
|
"loss": 0.6575, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.09370424597364568, |
|
"grad_norm": 0.73288893699646, |
|
"learning_rate": 6.28802751081779e-05, |
|
"loss": 0.6336, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0951683748169839, |
|
"grad_norm": 0.768713116645813, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 0.656, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09663250366032211, |
|
"grad_norm": 0.7750256061553955, |
|
"learning_rate": 5.6824564766150726e-05, |
|
"loss": 0.5982, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.09809663250366032, |
|
"grad_norm": 0.82322096824646, |
|
"learning_rate": 5.386588370213124e-05, |
|
"loss": 0.6943, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.09956076134699854, |
|
"grad_norm": 0.7596611380577087, |
|
"learning_rate": 5.095764961694922e-05, |
|
"loss": 0.6716, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.10102489019033675, |
|
"grad_norm": 0.7736366391181946, |
|
"learning_rate": 4.810304262187852e-05, |
|
"loss": 0.5755, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.10248901903367497, |
|
"grad_norm": 0.7916914820671082, |
|
"learning_rate": 4.530518418775733e-05, |
|
"loss": 0.6694, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.10395314787701318, |
|
"grad_norm": 0.8091153502464294, |
|
"learning_rate": 4.256713373170564e-05, |
|
"loss": 0.6588, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.1054172767203514, |
|
"grad_norm": 0.801612138748169, |
|
"learning_rate": 3.9891885271697496e-05, |
|
"loss": 0.6123, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.10688140556368961, |
|
"grad_norm": 0.7758727073669434, |
|
"learning_rate": 3.7282364152646297e-05, |
|
"loss": 0.594, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.10834553440702782, |
|
"grad_norm": 0.7896532416343689, |
|
"learning_rate": 3.4741423847583134e-05, |
|
"loss": 0.6035, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.10980966325036604, |
|
"grad_norm": 0.7845363020896912, |
|
"learning_rate": 3.227184283742591e-05, |
|
"loss": 0.6064, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.10980966325036604, |
|
"eval_loss": 0.6299811005592346, |
|
"eval_runtime": 77.5045, |
|
"eval_samples_per_second": 3.716, |
|
"eval_steps_per_second": 1.858, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11127379209370425, |
|
"grad_norm": 0.800595760345459, |
|
"learning_rate": 2.9876321572751144e-05, |
|
"loss": 0.6418, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.11273792093704246, |
|
"grad_norm": 0.8185175657272339, |
|
"learning_rate": 2.7557479520891104e-05, |
|
"loss": 0.6344, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.11420204978038068, |
|
"grad_norm": 0.8200536966323853, |
|
"learning_rate": 2.5317852301584643e-05, |
|
"loss": 0.6307, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.11566617862371889, |
|
"grad_norm": 0.8251193165779114, |
|
"learning_rate": 2.315988891431412e-05, |
|
"loss": 0.6243, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.1171303074670571, |
|
"grad_norm": 0.7931867241859436, |
|
"learning_rate": 2.1085949060360654e-05, |
|
"loss": 0.5778, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.11859443631039532, |
|
"grad_norm": 0.8764688372612, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.6503, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.12005856515373353, |
|
"grad_norm": 0.8642603158950806, |
|
"learning_rate": 1.7199116885197995e-05, |
|
"loss": 0.6904, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.12152269399707175, |
|
"grad_norm": 0.7701883316040039, |
|
"learning_rate": 1.5390474757906446e-05, |
|
"loss": 0.5929, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.12298682284040996, |
|
"grad_norm": 0.813798725605011, |
|
"learning_rate": 1.3674351904242611e-05, |
|
"loss": 0.6174, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.12445095168374817, |
|
"grad_norm": 0.8495608568191528, |
|
"learning_rate": 1.2052624879351104e-05, |
|
"loss": 0.6403, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1259150805270864, |
|
"grad_norm": 0.850382924079895, |
|
"learning_rate": 1.0527067017923654e-05, |
|
"loss": 0.5469, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.1273792093704246, |
|
"grad_norm": 0.8462463617324829, |
|
"learning_rate": 9.09934649508375e-06, |
|
"loss": 0.6733, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.12884333821376281, |
|
"grad_norm": 0.8402328491210938, |
|
"learning_rate": 7.771024502261526e-06, |
|
"loss": 0.5709, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.13030746705710103, |
|
"grad_norm": 0.9025482535362244, |
|
"learning_rate": 6.543553540053926e-06, |
|
"loss": 0.6922, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.13177159590043924, |
|
"grad_norm": 0.7909383177757263, |
|
"learning_rate": 5.418275829936537e-06, |
|
"loss": 0.5907, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.13323572474377746, |
|
"grad_norm": 0.7756696343421936, |
|
"learning_rate": 4.3964218465642355e-06, |
|
"loss": 0.5443, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.13469985358711567, |
|
"grad_norm": 0.837628185749054, |
|
"learning_rate": 3.4791089722651436e-06, |
|
"loss": 0.6027, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.13616398243045388, |
|
"grad_norm": 0.8768534660339355, |
|
"learning_rate": 2.667340275199426e-06, |
|
"loss": 0.6742, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.1376281112737921, |
|
"grad_norm": 0.7963073253631592, |
|
"learning_rate": 1.9620034125190644e-06, |
|
"loss": 0.5903, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.1390922401171303, |
|
"grad_norm": 0.8839337229728699, |
|
"learning_rate": 1.3638696597277679e-06, |
|
"loss": 0.6328, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.14055636896046853, |
|
"grad_norm": 0.9011049270629883, |
|
"learning_rate": 8.735930673024806e-07, |
|
"loss": 0.6805, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.14202049780380674, |
|
"grad_norm": 0.8585094213485718, |
|
"learning_rate": 4.917097454988584e-07, |
|
"loss": 0.5341, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.14348462664714495, |
|
"grad_norm": 0.8849230408668518, |
|
"learning_rate": 2.1863727812254653e-07, |
|
"loss": 0.6161, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.14494875549048317, |
|
"grad_norm": 0.9133574366569519, |
|
"learning_rate": 5.467426590739511e-08, |
|
"loss": 0.6795, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.14641288433382138, |
|
"grad_norm": 0.9264809489250183, |
|
"learning_rate": 0.0, |
|
"loss": 0.6004, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14641288433382138, |
|
"eval_loss": 0.6224383115768433, |
|
"eval_runtime": 77.4968, |
|
"eval_samples_per_second": 3.716, |
|
"eval_steps_per_second": 1.858, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.02119316209664e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|