|
{ |
|
"best_metric": 1.3878036737442017, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.02178886588953045, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000435777317790609, |
|
"grad_norm": 2.0851054191589355, |
|
"learning_rate": 5e-06, |
|
"loss": 1.6817, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000435777317790609, |
|
"eval_loss": 2.0230047702789307, |
|
"eval_runtime": 315.559, |
|
"eval_samples_per_second": 12.248, |
|
"eval_steps_per_second": 6.126, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000871554635581218, |
|
"grad_norm": 2.216548204421997, |
|
"learning_rate": 1e-05, |
|
"loss": 1.8021, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001307331953371827, |
|
"grad_norm": 2.3968324661254883, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.9165, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001743109271162436, |
|
"grad_norm": 2.045064926147461, |
|
"learning_rate": 2e-05, |
|
"loss": 1.8139, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002178886588953045, |
|
"grad_norm": 2.1230273246765137, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.8931, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002614663906743654, |
|
"grad_norm": 1.675313949584961, |
|
"learning_rate": 3e-05, |
|
"loss": 1.762, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003050441224534263, |
|
"grad_norm": 1.3971757888793945, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.5999, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003486218542324872, |
|
"grad_norm": 1.214509129524231, |
|
"learning_rate": 4e-05, |
|
"loss": 1.663, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003921995860115481, |
|
"grad_norm": 1.1938960552215576, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.6785, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00435777317790609, |
|
"grad_norm": 1.2489292621612549, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5493, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004793550495696699, |
|
"grad_norm": 1.2138938903808594, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.622, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005229327813487308, |
|
"grad_norm": 1.2661142349243164, |
|
"learning_rate": 6e-05, |
|
"loss": 1.5328, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005665105131277917, |
|
"grad_norm": 1.047982931137085, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 1.5021, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006100882449068526, |
|
"grad_norm": 0.9978450536727905, |
|
"learning_rate": 7e-05, |
|
"loss": 1.6194, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.006536659766859135, |
|
"grad_norm": 0.8680281043052673, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.3816, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.006972437084649744, |
|
"grad_norm": 0.9196011424064636, |
|
"learning_rate": 8e-05, |
|
"loss": 1.5334, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.007408214402440353, |
|
"grad_norm": 0.9667453765869141, |
|
"learning_rate": 8.5e-05, |
|
"loss": 1.4741, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.007843991720230961, |
|
"grad_norm": 0.8727541565895081, |
|
"learning_rate": 9e-05, |
|
"loss": 1.4547, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.008279769038021571, |
|
"grad_norm": 0.9195423722267151, |
|
"learning_rate": 9.5e-05, |
|
"loss": 1.5097, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.00871554635581218, |
|
"grad_norm": 1.05409574508667, |
|
"learning_rate": 0.0001, |
|
"loss": 1.4515, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00915132367360279, |
|
"grad_norm": 0.8618441820144653, |
|
"learning_rate": 9.999238475781957e-05, |
|
"loss": 1.3947, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.009587100991393397, |
|
"grad_norm": 0.8809842467308044, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 1.4563, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.010022878309184007, |
|
"grad_norm": 0.8538223505020142, |
|
"learning_rate": 9.99314767377287e-05, |
|
"loss": 1.4483, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.010458655626974615, |
|
"grad_norm": 0.8714299201965332, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.438, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.010894432944765225, |
|
"grad_norm": 0.8516822457313538, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 1.382, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011330210262555833, |
|
"grad_norm": 0.9048829078674316, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 1.469, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.011765987580346443, |
|
"grad_norm": 0.8362917900085449, |
|
"learning_rate": 9.962730758206611e-05, |
|
"loss": 1.4376, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.012201764898137051, |
|
"grad_norm": 0.8152568936347961, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 1.4729, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.012637542215927661, |
|
"grad_norm": 0.8475168943405151, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 1.4389, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01307331953371827, |
|
"grad_norm": 0.8305416703224182, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 1.444, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.013509096851508879, |
|
"grad_norm": 0.8335140943527222, |
|
"learning_rate": 9.908135917238321e-05, |
|
"loss": 1.3876, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.013944874169299489, |
|
"grad_norm": 0.8393346667289734, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.4717, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.014380651487090097, |
|
"grad_norm": 0.8358327150344849, |
|
"learning_rate": 9.871850323926177e-05, |
|
"loss": 1.3911, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.014816428804880707, |
|
"grad_norm": 0.8287831544876099, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 1.3734, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.015252206122671315, |
|
"grad_norm": 0.7427653670310974, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.3105, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.015687983440461923, |
|
"grad_norm": 0.8388314843177795, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 1.4373, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.016123760758252535, |
|
"grad_norm": 0.7671462297439575, |
|
"learning_rate": 9.781523779815179e-05, |
|
"loss": 1.3883, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.016559538076043143, |
|
"grad_norm": 0.8673655986785889, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.3485, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01699531539383375, |
|
"grad_norm": 0.8480393290519714, |
|
"learning_rate": 9.727592877996585e-05, |
|
"loss": 1.4058, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01743109271162436, |
|
"grad_norm": 0.8280643820762634, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 1.47, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01786687002941497, |
|
"grad_norm": 0.7347378730773926, |
|
"learning_rate": 9.667902132486009e-05, |
|
"loss": 1.2069, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01830264734720558, |
|
"grad_norm": 0.8081456422805786, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.4003, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.018738424664996187, |
|
"grad_norm": 0.7185221314430237, |
|
"learning_rate": 9.602524267262203e-05, |
|
"loss": 1.2813, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.019174201982786795, |
|
"grad_norm": 0.7656445503234863, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.4089, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.019609979300577406, |
|
"grad_norm": 0.936586320400238, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 1.3603, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.020045756618368014, |
|
"grad_norm": 0.7969225645065308, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.3352, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.020481533936158623, |
|
"grad_norm": 0.8012515306472778, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 1.3508, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02091731125394923, |
|
"grad_norm": 0.7480721473693848, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.3603, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.021353088571739842, |
|
"grad_norm": 0.814703643321991, |
|
"learning_rate": 9.373098535696979e-05, |
|
"loss": 1.3205, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.02178886588953045, |
|
"grad_norm": 0.9260087609291077, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.3108, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02178886588953045, |
|
"eval_loss": 1.3878036737442017, |
|
"eval_runtime": 317.8565, |
|
"eval_samples_per_second": 12.16, |
|
"eval_steps_per_second": 6.081, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.171470498830746e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|