|
{ |
|
"best_metric": 0.33776161074638367, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.32118839706915586, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003211883970691559, |
|
"grad_norm": 20.16118621826172, |
|
"learning_rate": 2e-05, |
|
"loss": 10.5874, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003211883970691559, |
|
"eval_loss": 10.961771011352539, |
|
"eval_runtime": 0.5041, |
|
"eval_samples_per_second": 99.189, |
|
"eval_steps_per_second": 25.789, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006423767941383118, |
|
"grad_norm": 19.579418182373047, |
|
"learning_rate": 4e-05, |
|
"loss": 11.2853, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009635651912074676, |
|
"grad_norm": 19.505998611450195, |
|
"learning_rate": 6e-05, |
|
"loss": 11.473, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012847535882766235, |
|
"grad_norm": 19.151931762695312, |
|
"learning_rate": 8e-05, |
|
"loss": 10.7664, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.016059419853457792, |
|
"grad_norm": 15.65612506866455, |
|
"learning_rate": 0.0001, |
|
"loss": 9.6991, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.019271303824149353, |
|
"grad_norm": 15.794941902160645, |
|
"learning_rate": 9.997539658034168e-05, |
|
"loss": 8.4858, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02248318779484091, |
|
"grad_norm": 24.663986206054688, |
|
"learning_rate": 9.990161322484486e-05, |
|
"loss": 7.7278, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02569507176553247, |
|
"grad_norm": 32.05091857910156, |
|
"learning_rate": 9.977873061452552e-05, |
|
"loss": 5.7351, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.028906955736224028, |
|
"grad_norm": 20.47426986694336, |
|
"learning_rate": 9.96068831197139e-05, |
|
"loss": 3.6171, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.032118839706915585, |
|
"grad_norm": 16.713817596435547, |
|
"learning_rate": 9.938625865312251e-05, |
|
"loss": 2.2651, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.035330723677607145, |
|
"grad_norm": 15.718864440917969, |
|
"learning_rate": 9.911709846436641e-05, |
|
"loss": 1.6319, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.038542607648298706, |
|
"grad_norm": 8.562857627868652, |
|
"learning_rate": 9.879969687616027e-05, |
|
"loss": 1.043, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.041754491618990267, |
|
"grad_norm": 7.533064842224121, |
|
"learning_rate": 9.84344009624807e-05, |
|
"loss": 1.4191, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04496637558968182, |
|
"grad_norm": 9.270398139953613, |
|
"learning_rate": 9.80216101690461e-05, |
|
"loss": 1.9694, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04817825956037338, |
|
"grad_norm": 6.436896324157715, |
|
"learning_rate": 9.756177587652856e-05, |
|
"loss": 1.4584, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05139014353106494, |
|
"grad_norm": 9.544507026672363, |
|
"learning_rate": 9.705540090697575e-05, |
|
"loss": 1.0719, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0546020275017565, |
|
"grad_norm": 12.543577194213867, |
|
"learning_rate": 9.650303897398232e-05, |
|
"loss": 1.2843, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.057813911472448055, |
|
"grad_norm": 5.713746070861816, |
|
"learning_rate": 9.590529407721231e-05, |
|
"loss": 0.9745, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.061025795443139616, |
|
"grad_norm": 3.9435653686523438, |
|
"learning_rate": 9.526281984193436e-05, |
|
"loss": 0.7748, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06423767941383117, |
|
"grad_norm": 12.931794166564941, |
|
"learning_rate": 9.4576318804292e-05, |
|
"loss": 2.0045, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06744956338452274, |
|
"grad_norm": 9.132307052612305, |
|
"learning_rate": 9.384654164309083e-05, |
|
"loss": 1.4456, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07066144735521429, |
|
"grad_norm": 4.245421886444092, |
|
"learning_rate": 9.30742863589421e-05, |
|
"loss": 0.8106, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07387333132590586, |
|
"grad_norm": 6.020063400268555, |
|
"learning_rate": 9.226039740166091e-05, |
|
"loss": 0.5633, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07708521529659741, |
|
"grad_norm": 5.152550220489502, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 0.6608, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08029709926728897, |
|
"grad_norm": 4.782017707824707, |
|
"learning_rate": 9.051132292283771e-05, |
|
"loss": 0.6502, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08029709926728897, |
|
"eval_loss": 0.6939069628715515, |
|
"eval_runtime": 0.5032, |
|
"eval_samples_per_second": 99.361, |
|
"eval_steps_per_second": 25.834, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08350898323798053, |
|
"grad_norm": 13.219664573669434, |
|
"learning_rate": 8.957804998855866e-05, |
|
"loss": 1.18, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08672086720867209, |
|
"grad_norm": 3.907947301864624, |
|
"learning_rate": 8.860696646428693e-05, |
|
"loss": 0.8817, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08993275117936364, |
|
"grad_norm": 2.1503279209136963, |
|
"learning_rate": 8.759913421559902e-05, |
|
"loss": 0.6689, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09314463515005521, |
|
"grad_norm": 1.8644012212753296, |
|
"learning_rate": 8.655565529226198e-05, |
|
"loss": 0.5879, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09635651912074676, |
|
"grad_norm": 5.081637859344482, |
|
"learning_rate": 8.547767072315835e-05, |
|
"loss": 0.6628, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09956840309143832, |
|
"grad_norm": 1.9498679637908936, |
|
"learning_rate": 8.436635926858759e-05, |
|
"loss": 0.4971, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.10278028706212988, |
|
"grad_norm": 1.7911735773086548, |
|
"learning_rate": 8.322293613130917e-05, |
|
"loss": 0.5756, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10599217103282144, |
|
"grad_norm": 5.527887344360352, |
|
"learning_rate": 8.204865162773613e-05, |
|
"loss": 1.094, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.109204055003513, |
|
"grad_norm": 1.6801953315734863, |
|
"learning_rate": 8.084478982073247e-05, |
|
"loss": 0.4828, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11241593897420456, |
|
"grad_norm": 2.1924173831939697, |
|
"learning_rate": 7.961266711550922e-05, |
|
"loss": 0.3928, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11562782294489611, |
|
"grad_norm": 2.8271541595458984, |
|
"learning_rate": 7.835363082015468e-05, |
|
"loss": 0.3965, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11883970691558768, |
|
"grad_norm": 1.9707531929016113, |
|
"learning_rate": 7.706905767237288e-05, |
|
"loss": 0.3363, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12205159088627923, |
|
"grad_norm": 2.6084580421447754, |
|
"learning_rate": 7.576035233404096e-05, |
|
"loss": 0.5723, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12526347485697079, |
|
"grad_norm": 2.948139190673828, |
|
"learning_rate": 7.442894585523218e-05, |
|
"loss": 0.7208, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12847535882766234, |
|
"grad_norm": 3.0574727058410645, |
|
"learning_rate": 7.307629410938363e-05, |
|
"loss": 0.5781, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13168724279835392, |
|
"grad_norm": 2.222038984298706, |
|
"learning_rate": 7.170387620131993e-05, |
|
"loss": 0.466, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13489912676904547, |
|
"grad_norm": 1.6518263816833496, |
|
"learning_rate": 7.031319284987394e-05, |
|
"loss": 0.3927, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13811101073973703, |
|
"grad_norm": 2.6922054290771484, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 0.5703, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14132289471042858, |
|
"grad_norm": 2.6648433208465576, |
|
"learning_rate": 6.7483130894283e-05, |
|
"loss": 0.5503, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.14453477868112014, |
|
"grad_norm": 2.1673121452331543, |
|
"learning_rate": 6.604684692133597e-05, |
|
"loss": 0.7952, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14774666265181172, |
|
"grad_norm": 3.4408926963806152, |
|
"learning_rate": 6.459848338346861e-05, |
|
"loss": 0.6431, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.15095854662250327, |
|
"grad_norm": 2.1872146129608154, |
|
"learning_rate": 6.313962404494496e-05, |
|
"loss": 0.4239, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15417043059319482, |
|
"grad_norm": 1.5436232089996338, |
|
"learning_rate": 6.167186414703289e-05, |
|
"loss": 0.4114, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15738231456388638, |
|
"grad_norm": 1.8399603366851807, |
|
"learning_rate": 6.019680866363139e-05, |
|
"loss": 0.3185, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.16059419853457793, |
|
"grad_norm": 2.190896987915039, |
|
"learning_rate": 5.8716070546254966e-05, |
|
"loss": 0.2297, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16059419853457793, |
|
"eval_loss": 0.5405478477478027, |
|
"eval_runtime": 0.5041, |
|
"eval_samples_per_second": 99.187, |
|
"eval_steps_per_second": 25.789, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16380608250526948, |
|
"grad_norm": 4.038471698760986, |
|
"learning_rate": 5.7231268960295e-05, |
|
"loss": 0.8339, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.16701796647596107, |
|
"grad_norm": 2.3349921703338623, |
|
"learning_rate": 5.574402751448614e-05, |
|
"loss": 0.4534, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.17022985044665262, |
|
"grad_norm": 1.8736135959625244, |
|
"learning_rate": 5.425597248551387e-05, |
|
"loss": 0.4491, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.17344173441734417, |
|
"grad_norm": 2.2482573986053467, |
|
"learning_rate": 5.2768731039705e-05, |
|
"loss": 0.403, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.17665361838803573, |
|
"grad_norm": 1.4259393215179443, |
|
"learning_rate": 5.128392945374505e-05, |
|
"loss": 0.3789, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.17986550235872728, |
|
"grad_norm": 1.762160301208496, |
|
"learning_rate": 4.980319133636863e-05, |
|
"loss": 0.4335, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.18307738632941886, |
|
"grad_norm": 1.4867217540740967, |
|
"learning_rate": 4.83281358529671e-05, |
|
"loss": 0.42, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.18628927030011042, |
|
"grad_norm": 1.774482250213623, |
|
"learning_rate": 4.686037595505507e-05, |
|
"loss": 0.7633, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.18950115427080197, |
|
"grad_norm": 1.6524527072906494, |
|
"learning_rate": 4.54015166165314e-05, |
|
"loss": 0.3312, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.19271303824149352, |
|
"grad_norm": 1.7809213399887085, |
|
"learning_rate": 4.395315307866405e-05, |
|
"loss": 0.3048, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19592492221218508, |
|
"grad_norm": 0.9493484497070312, |
|
"learning_rate": 4.2516869105717004e-05, |
|
"loss": 0.2782, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.19913680618287663, |
|
"grad_norm": 1.6046050786972046, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 0.2086, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.2023486901535682, |
|
"grad_norm": 4.323451042175293, |
|
"learning_rate": 3.968680715012606e-05, |
|
"loss": 0.5269, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.20556057412425977, |
|
"grad_norm": 3.2167468070983887, |
|
"learning_rate": 3.829612379868006e-05, |
|
"loss": 0.5914, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.20877245809495132, |
|
"grad_norm": 2.5836000442504883, |
|
"learning_rate": 3.692370589061639e-05, |
|
"loss": 0.3979, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.21198434206564287, |
|
"grad_norm": 1.6577327251434326, |
|
"learning_rate": 3.557105414476782e-05, |
|
"loss": 0.2982, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.21519622603633443, |
|
"grad_norm": 1.7948780059814453, |
|
"learning_rate": 3.423964766595906e-05, |
|
"loss": 0.353, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.218408110007026, |
|
"grad_norm": 2.2538440227508545, |
|
"learning_rate": 3.293094232762715e-05, |
|
"loss": 0.4393, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.22161999397771756, |
|
"grad_norm": 1.4487870931625366, |
|
"learning_rate": 3.164636917984534e-05, |
|
"loss": 0.3644, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.22483187794840911, |
|
"grad_norm": 1.4857341051101685, |
|
"learning_rate": 3.0387332884490805e-05, |
|
"loss": 0.5984, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22804376191910067, |
|
"grad_norm": 1.458071231842041, |
|
"learning_rate": 2.9155210179267546e-05, |
|
"loss": 0.4983, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.23125564588979222, |
|
"grad_norm": 1.4762952327728271, |
|
"learning_rate": 2.7951348372263875e-05, |
|
"loss": 0.247, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2344675298604838, |
|
"grad_norm": 1.1881077289581299, |
|
"learning_rate": 2.677706386869083e-05, |
|
"loss": 0.2679, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.23767941383117536, |
|
"grad_norm": 0.9410943984985352, |
|
"learning_rate": 2.5633640731412412e-05, |
|
"loss": 0.1891, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.2408912978018669, |
|
"grad_norm": 1.873358964920044, |
|
"learning_rate": 2.4522329276841663e-05, |
|
"loss": 0.2224, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2408912978018669, |
|
"eval_loss": 0.3565179407596588, |
|
"eval_runtime": 0.503, |
|
"eval_samples_per_second": 99.395, |
|
"eval_steps_per_second": 25.843, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.24410318177255846, |
|
"grad_norm": 2.8982174396514893, |
|
"learning_rate": 2.3444344707738015e-05, |
|
"loss": 0.6377, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.24731506574325002, |
|
"grad_norm": 2.408865451812744, |
|
"learning_rate": 2.2400865784401e-05, |
|
"loss": 0.4874, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.25052694971394157, |
|
"grad_norm": 2.227947473526001, |
|
"learning_rate": 2.1393033535713093e-05, |
|
"loss": 0.3836, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.2537388336846331, |
|
"grad_norm": 1.9388247728347778, |
|
"learning_rate": 2.0421950011441354e-05, |
|
"loss": 0.2834, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.2569507176553247, |
|
"grad_norm": 1.9769359827041626, |
|
"learning_rate": 1.9488677077162295e-05, |
|
"loss": 0.3367, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2601626016260163, |
|
"grad_norm": 2.0125112533569336, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 0.3863, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.26337448559670784, |
|
"grad_norm": 1.9390735626220703, |
|
"learning_rate": 1.77396025983391e-05, |
|
"loss": 0.393, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.2665863695673994, |
|
"grad_norm": 1.5394026041030884, |
|
"learning_rate": 1.6925713641057904e-05, |
|
"loss": 0.6196, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.26979825353809095, |
|
"grad_norm": 2.0069022178649902, |
|
"learning_rate": 1.6153458356909176e-05, |
|
"loss": 0.3049, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.2730101375087825, |
|
"grad_norm": 1.0801819562911987, |
|
"learning_rate": 1.5423681195707997e-05, |
|
"loss": 0.2012, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.27622202147947406, |
|
"grad_norm": 1.3949787616729736, |
|
"learning_rate": 1.4737180158065644e-05, |
|
"loss": 0.2644, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.2794339054501656, |
|
"grad_norm": 1.527002215385437, |
|
"learning_rate": 1.4094705922787687e-05, |
|
"loss": 0.2278, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.28264578942085716, |
|
"grad_norm": 1.6759589910507202, |
|
"learning_rate": 1.3496961026017687e-05, |
|
"loss": 0.3889, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.2858576733915487, |
|
"grad_norm": 2.828400135040283, |
|
"learning_rate": 1.2944599093024267e-05, |
|
"loss": 0.5727, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.28906955736224027, |
|
"grad_norm": 2.203261375427246, |
|
"learning_rate": 1.2438224123471442e-05, |
|
"loss": 0.3116, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2922814413329318, |
|
"grad_norm": 1.2678191661834717, |
|
"learning_rate": 1.1978389830953907e-05, |
|
"loss": 0.232, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.29549332530362343, |
|
"grad_norm": 1.533488154411316, |
|
"learning_rate": 1.1565599037519316e-05, |
|
"loss": 0.3384, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.298705209274315, |
|
"grad_norm": 1.5737160444259644, |
|
"learning_rate": 1.1200303123839742e-05, |
|
"loss": 0.3892, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.30191709324500654, |
|
"grad_norm": 2.21943998336792, |
|
"learning_rate": 1.088290153563358e-05, |
|
"loss": 0.4123, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3051289772156981, |
|
"grad_norm": 1.0407116413116455, |
|
"learning_rate": 1.0613741346877497e-05, |
|
"loss": 0.4279, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.30834086118638965, |
|
"grad_norm": 1.2876520156860352, |
|
"learning_rate": 1.0393116880286118e-05, |
|
"loss": 0.4044, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3115527451570812, |
|
"grad_norm": 1.4480308294296265, |
|
"learning_rate": 1.0221269385474488e-05, |
|
"loss": 0.1674, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.31476462912777275, |
|
"grad_norm": 0.8228076100349426, |
|
"learning_rate": 1.0098386775155147e-05, |
|
"loss": 0.2878, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.3179765130984643, |
|
"grad_norm": 1.4703351259231567, |
|
"learning_rate": 1.0024603419658329e-05, |
|
"loss": 0.2122, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.32118839706915586, |
|
"grad_norm": 0.9804506897926331, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1397, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32118839706915586, |
|
"eval_loss": 0.33776161074638367, |
|
"eval_runtime": 0.5035, |
|
"eval_samples_per_second": 99.308, |
|
"eval_steps_per_second": 25.82, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.666832784162816e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|