|
{ |
|
"best_metric": 1.3664671182632446, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.0101010101010102, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020202020202020204, |
|
"grad_norm": 5.289388179779053, |
|
"learning_rate": 5e-06, |
|
"loss": 3.9043, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.020202020202020204, |
|
"eval_loss": 5.048096656799316, |
|
"eval_runtime": 4.2841, |
|
"eval_samples_per_second": 19.607, |
|
"eval_steps_per_second": 9.804, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04040404040404041, |
|
"grad_norm": 6.221162796020508, |
|
"learning_rate": 1e-05, |
|
"loss": 4.4545, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06060606060606061, |
|
"grad_norm": 6.830175876617432, |
|
"learning_rate": 1.5e-05, |
|
"loss": 4.4955, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.08080808080808081, |
|
"grad_norm": 7.195596694946289, |
|
"learning_rate": 2e-05, |
|
"loss": 4.5755, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.10101010101010101, |
|
"grad_norm": 6.808708667755127, |
|
"learning_rate": 2.5e-05, |
|
"loss": 4.6561, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": 7.063287258148193, |
|
"learning_rate": 3e-05, |
|
"loss": 4.7259, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1414141414141414, |
|
"grad_norm": 7.591980934143066, |
|
"learning_rate": 3.5e-05, |
|
"loss": 4.7279, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.16161616161616163, |
|
"grad_norm": 6.363076686859131, |
|
"learning_rate": 4e-05, |
|
"loss": 4.4684, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 4.987633228302002, |
|
"learning_rate": 4.5e-05, |
|
"loss": 4.4504, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.20202020202020202, |
|
"grad_norm": 4.296280860900879, |
|
"learning_rate": 5e-05, |
|
"loss": 4.0533, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 3.862339735031128, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 4.0011, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 3.470243453979492, |
|
"learning_rate": 6e-05, |
|
"loss": 4.0229, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.26262626262626265, |
|
"grad_norm": 2.2312989234924316, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 2.9522, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2828282828282828, |
|
"grad_norm": 2.9605283737182617, |
|
"learning_rate": 7e-05, |
|
"loss": 3.0682, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.30303030303030304, |
|
"grad_norm": 2.9798710346221924, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.2887, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.32323232323232326, |
|
"grad_norm": 2.613205671310425, |
|
"learning_rate": 8e-05, |
|
"loss": 3.043, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3434343434343434, |
|
"grad_norm": 2.627354860305786, |
|
"learning_rate": 8.5e-05, |
|
"loss": 3.0214, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 2.2543275356292725, |
|
"learning_rate": 9e-05, |
|
"loss": 2.7168, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3838383838383838, |
|
"grad_norm": 1.868975281715393, |
|
"learning_rate": 9.5e-05, |
|
"loss": 2.8404, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.40404040404040403, |
|
"grad_norm": 1.8961381912231445, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6021, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.42424242424242425, |
|
"grad_norm": 1.7066221237182617, |
|
"learning_rate": 9.998517349225698e-05, |
|
"loss": 2.5649, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 1.9032788276672363, |
|
"learning_rate": 9.994070276204116e-05, |
|
"loss": 2.3567, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.46464646464646464, |
|
"grad_norm": 1.6003658771514893, |
|
"learning_rate": 9.986661418317759e-05, |
|
"loss": 2.2917, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": 1.7048600912094116, |
|
"learning_rate": 9.976295169466178e-05, |
|
"loss": 2.5865, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5050505050505051, |
|
"grad_norm": 1.562213659286499, |
|
"learning_rate": 9.962977677460132e-05, |
|
"loss": 2.0904, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5252525252525253, |
|
"grad_norm": 1.4012371301651, |
|
"learning_rate": 9.946716840375551e-05, |
|
"loss": 1.9331, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 1.4172520637512207, |
|
"learning_rate": 9.927522301869515e-05, |
|
"loss": 2.04, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5656565656565656, |
|
"grad_norm": 1.2087681293487549, |
|
"learning_rate": 9.905405445460972e-05, |
|
"loss": 2.0439, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5858585858585859, |
|
"grad_norm": 1.3168634176254272, |
|
"learning_rate": 9.880379387779637e-05, |
|
"loss": 2.0186, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"grad_norm": 1.3884882926940918, |
|
"learning_rate": 9.852458970787026e-05, |
|
"loss": 1.7423, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6262626262626263, |
|
"grad_norm": 1.2662495374679565, |
|
"learning_rate": 9.821660752974293e-05, |
|
"loss": 1.8134, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.6464646464646465, |
|
"grad_norm": 1.2647764682769775, |
|
"learning_rate": 9.78800299954203e-05, |
|
"loss": 1.9441, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 1.4162395000457764, |
|
"learning_rate": 9.751505671567913e-05, |
|
"loss": 1.7532, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6868686868686869, |
|
"grad_norm": 1.3138062953948975, |
|
"learning_rate": 9.712190414168572e-05, |
|
"loss": 1.8705, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.7070707070707071, |
|
"grad_norm": 1.2262977361679077, |
|
"learning_rate": 9.67008054366274e-05, |
|
"loss": 1.7738, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 1.5342642068862915, |
|
"learning_rate": 9.625201033743261e-05, |
|
"loss": 1.9003, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.7474747474747475, |
|
"grad_norm": 1.0173803567886353, |
|
"learning_rate": 9.577578500666187e-05, |
|
"loss": 1.4541, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.7676767676767676, |
|
"grad_norm": 0.9901583194732666, |
|
"learning_rate": 9.527241187465734e-05, |
|
"loss": 1.577, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7878787878787878, |
|
"grad_norm": 0.9777953028678894, |
|
"learning_rate": 9.474218947204459e-05, |
|
"loss": 1.396, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.8080808080808081, |
|
"grad_norm": 1.240479588508606, |
|
"learning_rate": 9.418543225268596e-05, |
|
"loss": 1.6402, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8282828282828283, |
|
"grad_norm": 1.1354819536209106, |
|
"learning_rate": 9.360247040719039e-05, |
|
"loss": 1.5014, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": 1.0067344903945923, |
|
"learning_rate": 9.29936496670905e-05, |
|
"loss": 1.4353, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.8686868686868687, |
|
"grad_norm": 1.0829236507415771, |
|
"learning_rate": 9.235933109980301e-05, |
|
"loss": 1.4657, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 1.0891422033309937, |
|
"learning_rate": 9.16998908944939e-05, |
|
"loss": 1.5081, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 1.0870673656463623, |
|
"learning_rate": 9.101572013897555e-05, |
|
"loss": 1.4492, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.9292929292929293, |
|
"grad_norm": 1.154875636100769, |
|
"learning_rate": 9.030722458776814e-05, |
|
"loss": 1.4784, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.9494949494949495, |
|
"grad_norm": 1.2022784948349, |
|
"learning_rate": 8.957482442146272e-05, |
|
"loss": 1.4172, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 1.1286842823028564, |
|
"learning_rate": 8.881895399752874e-05, |
|
"loss": 1.3976, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.98989898989899, |
|
"grad_norm": 1.2492576837539673, |
|
"learning_rate": 8.80400615927139e-05, |
|
"loss": 1.3521, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.0101010101010102, |
|
"grad_norm": 1.5818628072738647, |
|
"learning_rate": 8.72386091371891e-05, |
|
"loss": 1.8895, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0101010101010102, |
|
"eval_loss": 1.3664671182632446, |
|
"eval_runtime": 4.2876, |
|
"eval_samples_per_second": 19.591, |
|
"eval_steps_per_second": 9.796, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 149, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.359873201143808e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|