|
{ |
|
"best_metric": 1.636163353919983, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03311806590495115, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000662361318099023, |
|
"grad_norm": 39.855995178222656, |
|
"learning_rate": 5e-05, |
|
"loss": 27.5824, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000662361318099023, |
|
"eval_loss": 2.2696444988250732, |
|
"eval_runtime": 434.2712, |
|
"eval_samples_per_second": 23.421, |
|
"eval_steps_per_second": 2.929, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001324722636198046, |
|
"grad_norm": 35.34762191772461, |
|
"learning_rate": 0.0001, |
|
"loss": 27.0301, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001987083954297069, |
|
"grad_norm": 40.128257751464844, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 30.0122, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002649445272396092, |
|
"grad_norm": 35.508766174316406, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 27.1283, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003311806590495115, |
|
"grad_norm": 35.003509521484375, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 30.1937, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003974167908594138, |
|
"grad_norm": 33.496620178222656, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 32.1736, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004636529226693161, |
|
"grad_norm": 28.025121688842773, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 28.1705, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005298890544792184, |
|
"grad_norm": 38.546974182128906, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 31.4793, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005961251862891207, |
|
"grad_norm": 31.470550537109375, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 29.978, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00662361318099023, |
|
"grad_norm": 33.33460235595703, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 29.724, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007285974499089253, |
|
"grad_norm": 38.43280792236328, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 33.3028, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007948335817188276, |
|
"grad_norm": 35.78132247924805, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 31.1296, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0086106971352873, |
|
"grad_norm": 36.88593673706055, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 30.9662, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009273058453386322, |
|
"grad_norm": 35.441650390625, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 26.3844, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009935419771485345, |
|
"grad_norm": 25.30802345275879, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 26.8914, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010597781089584368, |
|
"grad_norm": 20.81322479248047, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 24.8694, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011260142407683391, |
|
"grad_norm": 20.53407859802246, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 24.7313, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011922503725782414, |
|
"grad_norm": 21.87583351135254, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 26.0212, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012584865043881437, |
|
"grad_norm": 22.73112678527832, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 25.5372, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01324722636198046, |
|
"grad_norm": 22.921876907348633, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 26.2909, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.013909587680079483, |
|
"grad_norm": 24.30012321472168, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 27.5489, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014571948998178506, |
|
"grad_norm": 24.02613639831543, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 27.3856, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01523431031627753, |
|
"grad_norm": 28.35286521911621, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 27.7234, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015896671634376552, |
|
"grad_norm": 33.62429428100586, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 29.4387, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.016559032952475575, |
|
"grad_norm": 34.30913162231445, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 29.8534, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016559032952475575, |
|
"eval_loss": 1.6895121335983276, |
|
"eval_runtime": 437.1193, |
|
"eval_samples_per_second": 23.268, |
|
"eval_steps_per_second": 2.91, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0172213942705746, |
|
"grad_norm": 21.834144592285156, |
|
"learning_rate": 5e-05, |
|
"loss": 22.4394, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01788375558867362, |
|
"grad_norm": 19.145488739013672, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 23.0711, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.018546116906772644, |
|
"grad_norm": 18.362302780151367, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 23.7712, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.019208478224871667, |
|
"grad_norm": 26.06848907470703, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 27.9575, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01987083954297069, |
|
"grad_norm": 20.97596549987793, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 25.7123, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.020533200861069714, |
|
"grad_norm": 19.997718811035156, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 24.3034, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.021195562179168737, |
|
"grad_norm": 22.800750732421875, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 25.7513, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02185792349726776, |
|
"grad_norm": 22.728124618530273, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 26.8797, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.022520284815366783, |
|
"grad_norm": 22.23642349243164, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 26.9258, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.023182646133465806, |
|
"grad_norm": 22.032678604125977, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 25.6688, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02384500745156483, |
|
"grad_norm": 26.3785400390625, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 28.5324, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02450736876966385, |
|
"grad_norm": 26.36612892150879, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 31.2677, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.025169730087762875, |
|
"grad_norm": 30.574783325195312, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 27.0322, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.025832091405861898, |
|
"grad_norm": 18.087467193603516, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 23.8036, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02649445272396092, |
|
"grad_norm": 23.013254165649414, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 26.3198, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.027156814042059944, |
|
"grad_norm": 16.78318977355957, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 22.9086, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.027819175360158967, |
|
"grad_norm": 17.115629196166992, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 25.436, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.02848153667825799, |
|
"grad_norm": 17.63496208190918, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 25.0821, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.029143897996357013, |
|
"grad_norm": 21.715238571166992, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 28.0717, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.029806259314456036, |
|
"grad_norm": 17.863527297973633, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 26.0547, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03046862063255506, |
|
"grad_norm": 20.718631744384766, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 25.5254, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.031130981950654082, |
|
"grad_norm": 21.940893173217773, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 25.1054, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.031793343268753105, |
|
"grad_norm": 23.446805953979492, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 26.1938, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03245570458685213, |
|
"grad_norm": 23.616628646850586, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 27.9034, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03311806590495115, |
|
"grad_norm": 28.035184860229492, |
|
"learning_rate": 0.0, |
|
"loss": 28.7963, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03311806590495115, |
|
"eval_loss": 1.636163353919983, |
|
"eval_runtime": 435.9234, |
|
"eval_samples_per_second": 23.332, |
|
"eval_steps_per_second": 2.918, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.435490789261312e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|