|
{ |
|
"best_metric": 2.0614235401153564, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.18181818181818182, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0036363636363636364, |
|
"grad_norm": 5.301558971405029, |
|
"learning_rate": 2e-05, |
|
"loss": 4.6837, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036363636363636364, |
|
"eval_loss": 4.996428966522217, |
|
"eval_runtime": 16.332, |
|
"eval_samples_per_second": 28.349, |
|
"eval_steps_per_second": 3.551, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007272727272727273, |
|
"grad_norm": 4.122321128845215, |
|
"learning_rate": 4e-05, |
|
"loss": 4.0658, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01090909090909091, |
|
"grad_norm": 4.729526996612549, |
|
"learning_rate": 6e-05, |
|
"loss": 4.4412, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.014545454545454545, |
|
"grad_norm": 5.44406795501709, |
|
"learning_rate": 8e-05, |
|
"loss": 4.9525, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01818181818181818, |
|
"grad_norm": 4.86469841003418, |
|
"learning_rate": 0.0001, |
|
"loss": 4.9367, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02181818181818182, |
|
"grad_norm": 5.874657154083252, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 4.7384, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.025454545454545455, |
|
"grad_norm": 5.480888366699219, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 4.2604, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"grad_norm": 2.321042537689209, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.3919, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03272727272727273, |
|
"grad_norm": 2.5301353931427, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.6209, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03636363636363636, |
|
"grad_norm": 2.511705160140991, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.5702, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.504190444946289, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.6799, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04363636363636364, |
|
"grad_norm": 2.3854761123657227, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.496, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04727272727272727, |
|
"grad_norm": 2.252695083618164, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.5682, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05090909090909091, |
|
"grad_norm": 2.9842593669891357, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.5234, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05454545454545454, |
|
"grad_norm": 2.452545404434204, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.5165, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05818181818181818, |
|
"grad_norm": 1.9901292324066162, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.2514, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06181818181818182, |
|
"grad_norm": 1.9319262504577637, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.115, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06545454545454546, |
|
"grad_norm": 2.623239755630493, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.3468, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06909090909090909, |
|
"grad_norm": 2.582571506500244, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.3558, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"grad_norm": 2.57682466506958, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4353, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07636363636363637, |
|
"grad_norm": 2.5038254261016846, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.5775, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 1.9422601461410522, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.9987, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08363636363636363, |
|
"grad_norm": 2.397315740585327, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.1462, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08727272727272728, |
|
"grad_norm": 2.152684450149536, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.3077, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 2.3351070880889893, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.4673, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"eval_loss": 2.2046091556549072, |
|
"eval_runtime": 16.6106, |
|
"eval_samples_per_second": 27.874, |
|
"eval_steps_per_second": 3.492, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09454545454545454, |
|
"grad_norm": 2.232198476791382, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.3027, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09818181818181818, |
|
"grad_norm": 2.7883410453796387, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.443, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10181818181818182, |
|
"grad_norm": 2.533133029937744, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.4495, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10545454545454545, |
|
"grad_norm": 2.0543668270111084, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.9154, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.10909090909090909, |
|
"grad_norm": 2.120340585708618, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.9321, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11272727272727273, |
|
"grad_norm": 2.3573496341705322, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 2.2358, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11636363636363636, |
|
"grad_norm": 2.295992851257324, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.1238, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 2.4279768466949463, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.3682, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.12363636363636364, |
|
"grad_norm": 2.529550313949585, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.209, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.12727272727272726, |
|
"grad_norm": 2.266796350479126, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.1055, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13090909090909092, |
|
"grad_norm": 2.4525721073150635, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.1957, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.13454545454545455, |
|
"grad_norm": 1.8528436422348022, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.8631, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.13818181818181818, |
|
"grad_norm": 1.8262501955032349, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 2.0233, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14181818181818182, |
|
"grad_norm": 2.080160617828369, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 1.884, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14545454545454545, |
|
"grad_norm": 2.064708709716797, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.0776, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14909090909090908, |
|
"grad_norm": 2.0797815322875977, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 2.0999, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.15272727272727274, |
|
"grad_norm": 2.636698007583618, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.0736, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.15636363636363637, |
|
"grad_norm": 3.001413345336914, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 2.1067, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 2.8234450817108154, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 2.7408, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.16363636363636364, |
|
"grad_norm": 2.3741395473480225, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.0327, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.16727272727272727, |
|
"grad_norm": 2.951899290084839, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 2.312, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.1709090909090909, |
|
"grad_norm": 3.7195920944213867, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 1.9652, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.17454545454545456, |
|
"grad_norm": 3.5826423168182373, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.4803, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1781818181818182, |
|
"grad_norm": 3.770677328109741, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 2.265, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 5.106058120727539, |
|
"learning_rate": 0.0, |
|
"loss": 2.3384, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"eval_loss": 2.0614235401153564, |
|
"eval_runtime": 16.6414, |
|
"eval_samples_per_second": 27.822, |
|
"eval_steps_per_second": 3.485, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.5547217133568e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|