|
{ |
|
"best_metric": 0.2677198052406311, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.031578947368421, |
|
"eval_steps": 25, |
|
"global_step": 36, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08421052631578947, |
|
"grad_norm": 5.290541648864746, |
|
"learning_rate": 5e-05, |
|
"loss": 6.1391, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08421052631578947, |
|
"eval_loss": 6.062305450439453, |
|
"eval_runtime": 4.7353, |
|
"eval_samples_per_second": 16.895, |
|
"eval_steps_per_second": 2.112, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16842105263157894, |
|
"grad_norm": 10.83039379119873, |
|
"learning_rate": 0.0001, |
|
"loss": 6.2114, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.25263157894736843, |
|
"grad_norm": 9.01547908782959, |
|
"learning_rate": 9.978670881475172e-05, |
|
"loss": 4.4483, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3368421052631579, |
|
"grad_norm": 4.820804119110107, |
|
"learning_rate": 9.91486549841951e-05, |
|
"loss": 5.8774, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 5.7229180335998535, |
|
"learning_rate": 9.809128215864097e-05, |
|
"loss": 2.4323, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.5052631578947369, |
|
"grad_norm": 4.481346607208252, |
|
"learning_rate": 9.662361147021779e-05, |
|
"loss": 3.1135, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5894736842105263, |
|
"grad_norm": 5.059967041015625, |
|
"learning_rate": 9.475816456775313e-05, |
|
"loss": 5.1153, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6736842105263158, |
|
"grad_norm": 3.4522106647491455, |
|
"learning_rate": 9.251085678648072e-05, |
|
"loss": 0.9424, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7578947368421053, |
|
"grad_norm": 2.8100931644439697, |
|
"learning_rate": 8.9900861364012e-05, |
|
"loss": 2.7768, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 2.5981743335723877, |
|
"learning_rate": 8.695044586103296e-05, |
|
"loss": 2.3917, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.9263157894736842, |
|
"grad_norm": 1.2653751373291016, |
|
"learning_rate": 8.368478218232787e-05, |
|
"loss": 0.2868, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0105263157894737, |
|
"grad_norm": 2.118408441543579, |
|
"learning_rate": 8.013173181896283e-05, |
|
"loss": 1.4738, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.0947368421052632, |
|
"grad_norm": 3.251176357269287, |
|
"learning_rate": 7.63216081438678e-05, |
|
"loss": 2.1097, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.1789473684210527, |
|
"grad_norm": 1.2804324626922607, |
|
"learning_rate": 7.228691778882693e-05, |
|
"loss": 0.4403, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 1.913195252418518, |
|
"learning_rate": 6.806208330935766e-05, |
|
"loss": 0.4639, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.3473684210526315, |
|
"grad_norm": 3.9906165599823, |
|
"learning_rate": 6.368314950360415e-05, |
|
"loss": 1.0192, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.431578947368421, |
|
"grad_norm": 0.4292667508125305, |
|
"learning_rate": 5.918747589082853e-05, |
|
"loss": 0.0192, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.5157894736842106, |
|
"grad_norm": 3.494691848754883, |
|
"learning_rate": 5.4613417973165106e-05, |
|
"loss": 0.2441, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 4.833470821380615, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6926, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 0.3874737024307251, |
|
"learning_rate": 4.5386582026834906e-05, |
|
"loss": 0.0087, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.768421052631579, |
|
"grad_norm": 1.789414405822754, |
|
"learning_rate": 4.0812524109171476e-05, |
|
"loss": 0.2853, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.8526315789473684, |
|
"grad_norm": 3.8535306453704834, |
|
"learning_rate": 3.631685049639586e-05, |
|
"loss": 0.3714, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.936842105263158, |
|
"grad_norm": 0.4089250862598419, |
|
"learning_rate": 3.1937916690642356e-05, |
|
"loss": 0.0266, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.0210526315789474, |
|
"grad_norm": 0.921528160572052, |
|
"learning_rate": 2.771308221117309e-05, |
|
"loss": 0.2166, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 3.09906268119812, |
|
"learning_rate": 2.3678391856132204e-05, |
|
"loss": 0.4467, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"eval_loss": 0.2677198052406311, |
|
"eval_runtime": 4.2004, |
|
"eval_samples_per_second": 19.046, |
|
"eval_steps_per_second": 2.381, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.1894736842105265, |
|
"grad_norm": 0.04200873523950577, |
|
"learning_rate": 1.9868268181037185e-05, |
|
"loss": 0.0011, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.2736842105263158, |
|
"grad_norm": 2.74108624458313, |
|
"learning_rate": 1.631521781767214e-05, |
|
"loss": 0.0985, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.3578947368421055, |
|
"grad_norm": 2.8675966262817383, |
|
"learning_rate": 1.3049554138967051e-05, |
|
"loss": 0.4339, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.442105263157895, |
|
"grad_norm": 0.026142483577132225, |
|
"learning_rate": 1.0099138635988026e-05, |
|
"loss": 0.0007, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 4.404775619506836, |
|
"learning_rate": 7.489143213519301e-06, |
|
"loss": 0.1664, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.610526315789474, |
|
"grad_norm": 2.646777629852295, |
|
"learning_rate": 5.241835432246889e-06, |
|
"loss": 0.25, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.694736842105263, |
|
"grad_norm": 0.018841933459043503, |
|
"learning_rate": 3.376388529782215e-06, |
|
"loss": 0.0006, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.7789473684210524, |
|
"grad_norm": 4.183905601501465, |
|
"learning_rate": 1.908717841359048e-06, |
|
"loss": 0.2952, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.863157894736842, |
|
"grad_norm": 2.0903871059417725, |
|
"learning_rate": 8.513450158049108e-07, |
|
"loss": 0.2034, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 2.281219959259033, |
|
"learning_rate": 2.1329118524827662e-07, |
|
"loss": 0.0681, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.031578947368421, |
|
"grad_norm": 1.8667627573013306, |
|
"learning_rate": 0.0, |
|
"loss": 0.1752, |
|
"step": 36 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 36, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.610862382625587e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|