|
{ |
|
"best_metric": 6.911687850952148, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.4125838060856111, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008251676121712223, |
|
"grad_norm": 0.19007240235805511, |
|
"learning_rate": 5e-05, |
|
"loss": 6.8772, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008251676121712223, |
|
"eval_loss": 6.937552452087402, |
|
"eval_runtime": 0.8717, |
|
"eval_samples_per_second": 937.293, |
|
"eval_steps_per_second": 118.165, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016503352243424446, |
|
"grad_norm": 0.18242356181144714, |
|
"learning_rate": 0.0001, |
|
"loss": 6.858, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02475502836513667, |
|
"grad_norm": 0.1995544582605362, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 6.8447, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03300670448684889, |
|
"grad_norm": 0.18593429028987885, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 6.8302, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.041258380608561115, |
|
"grad_norm": 0.18931446969509125, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 6.8183, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04951005673027334, |
|
"grad_norm": 0.1950785368680954, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.8054, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05776173285198556, |
|
"grad_norm": 0.20064397156238556, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 6.7876, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06601340897369778, |
|
"grad_norm": 0.22192345559597015, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 6.769, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07426508509541001, |
|
"grad_norm": 0.22847650945186615, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 6.7498, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08251676121712223, |
|
"grad_norm": 0.2764003574848175, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 6.7123, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09076843733883445, |
|
"grad_norm": 0.25168463587760925, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 6.6124, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09902011346054668, |
|
"grad_norm": 0.31273701786994934, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 6.3779, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1072717895822589, |
|
"grad_norm": 0.18679746985435486, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 6.8239, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11552346570397112, |
|
"grad_norm": 0.19334200024604797, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 6.8569, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12377514182568335, |
|
"grad_norm": 0.21157041192054749, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 6.8419, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13202681794739557, |
|
"grad_norm": 0.20532171428203583, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 6.8266, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14027849406910778, |
|
"grad_norm": 0.1977279633283615, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 6.8141, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14853017019082002, |
|
"grad_norm": 0.19488519430160522, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 6.7972, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15678184631253222, |
|
"grad_norm": 0.21557967364788055, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 6.7828, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16503352243424446, |
|
"grad_norm": 0.22028861939907074, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 6.7694, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17328519855595667, |
|
"grad_norm": 0.23538333177566528, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 6.7487, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1815368746776689, |
|
"grad_norm": 0.2506059408187866, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 6.7218, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18978855079938112, |
|
"grad_norm": 0.25545573234558105, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 6.6615, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19804022692109335, |
|
"grad_norm": 0.28446659445762634, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 6.4915, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20629190304280556, |
|
"grad_norm": 0.3152291476726532, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 6.185, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20629190304280556, |
|
"eval_loss": 6.917301654815674, |
|
"eval_runtime": 0.8631, |
|
"eval_samples_per_second": 946.555, |
|
"eval_steps_per_second": 119.333, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2145435791645178, |
|
"grad_norm": 0.20274803042411804, |
|
"learning_rate": 5e-05, |
|
"loss": 6.8614, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22279525528623, |
|
"grad_norm": 0.21126407384872437, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 6.8386, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.23104693140794225, |
|
"grad_norm": 0.2221248298883438, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 6.8223, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23929860752965446, |
|
"grad_norm": 0.2102772444486618, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 6.8099, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2475502836513667, |
|
"grad_norm": 0.20332932472229004, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 6.7978, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2558019597730789, |
|
"grad_norm": 0.22618810832500458, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 6.7843, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.26405363589479114, |
|
"grad_norm": 0.2285047322511673, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 6.7745, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2723053120165034, |
|
"grad_norm": 0.23838725686073303, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 6.7552, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.28055698813821556, |
|
"grad_norm": 0.22661758959293365, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 6.7389, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2888086642599278, |
|
"grad_norm": 0.2469901740550995, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 6.6998, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29706034038164003, |
|
"grad_norm": 0.25235992670059204, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 6.6207, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.30531201650335227, |
|
"grad_norm": 0.3499048054218292, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 6.4126, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.31356369262506445, |
|
"grad_norm": 0.19594097137451172, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 6.8115, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3218153687467767, |
|
"grad_norm": 0.21886812150478363, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 6.842, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3300670448684889, |
|
"grad_norm": 0.2264992594718933, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 6.8276, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33831872099020116, |
|
"grad_norm": 0.23042714595794678, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 6.8102, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.34657039711191334, |
|
"grad_norm": 0.21896089613437653, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 6.7979, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3548220732336256, |
|
"grad_norm": 0.21895575523376465, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 6.7872, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3630737493553378, |
|
"grad_norm": 0.21815863251686096, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 6.7742, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.37132542547705005, |
|
"grad_norm": 0.22837361693382263, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 6.7574, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.37957710159876223, |
|
"grad_norm": 0.2377353459596634, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 6.7389, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38782877772047447, |
|
"grad_norm": 0.2529526650905609, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 6.7065, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3960804538421867, |
|
"grad_norm": 0.2696276307106018, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 6.658, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4043321299638989, |
|
"grad_norm": 0.2675720751285553, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 6.5319, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4125838060856111, |
|
"grad_norm": 0.28477150201797485, |
|
"learning_rate": 0.0, |
|
"loss": 6.2163, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4125838060856111, |
|
"eval_loss": 6.911687850952148, |
|
"eval_runtime": 0.78, |
|
"eval_samples_per_second": 1047.488, |
|
"eval_steps_per_second": 132.058, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5705957376000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|