|
{ |
|
"best_metric": 1.467692255973816, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.6650041562759768, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013300083125519535, |
|
"grad_norm": 0.30807769298553467, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5989, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013300083125519535, |
|
"eval_loss": 1.8170369863510132, |
|
"eval_runtime": 4.4482, |
|
"eval_samples_per_second": 113.979, |
|
"eval_steps_per_second": 14.388, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02660016625103907, |
|
"grad_norm": 0.33670011162757874, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6635, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0399002493765586, |
|
"grad_norm": 0.35574978590011597, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 1.639, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05320033250207814, |
|
"grad_norm": 0.43438073992729187, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.7603, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06650041562759768, |
|
"grad_norm": 0.4591783583164215, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.7698, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0798004987531172, |
|
"grad_norm": 0.3775702118873596, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.6717, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09310058187863675, |
|
"grad_norm": 0.40178415179252625, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.6908, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10640066500415628, |
|
"grad_norm": 0.39939138293266296, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.6511, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11970074812967581, |
|
"grad_norm": 0.4064088761806488, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.6223, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13300083125519535, |
|
"grad_norm": 0.42422300577163696, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.6776, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14630091438071488, |
|
"grad_norm": 0.4172786474227905, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.6897, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1596009975062344, |
|
"grad_norm": 0.4498286247253418, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.7711, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17290108063175394, |
|
"grad_norm": 0.2242169827222824, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.5764, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1862011637572735, |
|
"grad_norm": 0.220079705119133, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.4311, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19950124688279303, |
|
"grad_norm": 0.2502831816673279, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.5599, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21280133000831256, |
|
"grad_norm": 0.27619630098342896, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.5565, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.22610141313383209, |
|
"grad_norm": 0.2701985239982605, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.5664, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.23940149625935161, |
|
"grad_norm": 0.24903272092342377, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.596, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.25270157938487114, |
|
"grad_norm": 0.2547197937965393, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.4784, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2660016625103907, |
|
"grad_norm": 0.22915960848331451, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.4753, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2793017456359102, |
|
"grad_norm": 0.2612093985080719, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.5629, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.29260182876142976, |
|
"grad_norm": 0.26348868012428284, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.412, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3059019118869493, |
|
"grad_norm": 0.28813788294792175, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.5054, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3192019950124688, |
|
"grad_norm": 0.31999000906944275, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.5242, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3325020781379884, |
|
"grad_norm": 0.4172128736972809, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.499, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3325020781379884, |
|
"eval_loss": 1.4860926866531372, |
|
"eval_runtime": 4.4333, |
|
"eval_samples_per_second": 114.363, |
|
"eval_steps_per_second": 14.436, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3458021612635079, |
|
"grad_norm": 0.18647287786006927, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4277, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.35910224438902744, |
|
"grad_norm": 0.20698946714401245, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.4413, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.372402327514547, |
|
"grad_norm": 0.20511312782764435, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.4307, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3857024106400665, |
|
"grad_norm": 0.2180362045764923, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.4408, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39900249376558605, |
|
"grad_norm": 0.21642690896987915, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.4654, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.41230257689110555, |
|
"grad_norm": 0.21712642908096313, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.4509, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4256026600166251, |
|
"grad_norm": 0.22278568148612976, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.4888, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4389027431421446, |
|
"grad_norm": 0.2208184450864792, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.3575, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.45220282626766417, |
|
"grad_norm": 0.23661835491657257, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.4795, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.46550290939318373, |
|
"grad_norm": 0.25014418363571167, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.48, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.47880299251870323, |
|
"grad_norm": 0.26492103934288025, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.4518, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4921030756442228, |
|
"grad_norm": 0.33129605650901794, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.5776, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5054031587697423, |
|
"grad_norm": 0.22219394147396088, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.5443, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5187032418952618, |
|
"grad_norm": 0.21399854123592377, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.4885, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5320033250207814, |
|
"grad_norm": 0.20631512999534607, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.3427, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.545303408146301, |
|
"grad_norm": 0.21566346287727356, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.459, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5586034912718204, |
|
"grad_norm": 0.20011813938617706, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.4605, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.57190357439734, |
|
"grad_norm": 0.21354447305202484, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.4537, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5852036575228595, |
|
"grad_norm": 0.20864373445510864, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.3382, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5985037406483791, |
|
"grad_norm": 0.20621033012866974, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.3996, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6118038237738986, |
|
"grad_norm": 0.24815712869167328, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.4625, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6251039068994181, |
|
"grad_norm": 0.2448890209197998, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.4835, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6384039900249376, |
|
"grad_norm": 0.26246240735054016, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.4203, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6517040731504572, |
|
"grad_norm": 0.30303773283958435, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.5443, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6650041562759768, |
|
"grad_norm": 0.4216666519641876, |
|
"learning_rate": 0.0, |
|
"loss": 1.5894, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6650041562759768, |
|
"eval_loss": 1.467692255973816, |
|
"eval_runtime": 4.4285, |
|
"eval_samples_per_second": 114.486, |
|
"eval_steps_per_second": 14.452, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.33416392081408e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|