|
{ |
|
"best_metric": 5.311061859130859, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.5076142131979695, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01015228426395939, |
|
"grad_norm": 0.46062880754470825, |
|
"learning_rate": 5e-06, |
|
"loss": 5.3841, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01015228426395939, |
|
"eval_loss": 5.5305962562561035, |
|
"eval_runtime": 8.7087, |
|
"eval_samples_per_second": 19.061, |
|
"eval_steps_per_second": 9.531, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02030456852791878, |
|
"grad_norm": 0.5978866815567017, |
|
"learning_rate": 1e-05, |
|
"loss": 5.4053, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.030456852791878174, |
|
"grad_norm": 0.4448310434818268, |
|
"learning_rate": 1.5e-05, |
|
"loss": 5.3689, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04060913705583756, |
|
"grad_norm": 0.507191002368927, |
|
"learning_rate": 2e-05, |
|
"loss": 5.4424, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.050761421319796954, |
|
"grad_norm": 0.4688023626804352, |
|
"learning_rate": 2.5e-05, |
|
"loss": 5.4252, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06091370558375635, |
|
"grad_norm": 0.4471573531627655, |
|
"learning_rate": 3e-05, |
|
"loss": 5.3927, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07106598984771574, |
|
"grad_norm": 0.45171481370925903, |
|
"learning_rate": 3.5e-05, |
|
"loss": 5.3751, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.08121827411167512, |
|
"grad_norm": 0.6176170706748962, |
|
"learning_rate": 4e-05, |
|
"loss": 5.4215, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.09137055837563451, |
|
"grad_norm": 0.5099933743476868, |
|
"learning_rate": 4.5e-05, |
|
"loss": 5.3422, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10152284263959391, |
|
"grad_norm": 0.48276761174201965, |
|
"learning_rate": 5e-05, |
|
"loss": 5.4263, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1116751269035533, |
|
"grad_norm": 0.5048362612724304, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 5.3427, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1218274111675127, |
|
"grad_norm": 0.5130631923675537, |
|
"learning_rate": 6e-05, |
|
"loss": 5.3817, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1319796954314721, |
|
"grad_norm": 0.5273154377937317, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 5.3019, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.14213197969543148, |
|
"grad_norm": 0.523825466632843, |
|
"learning_rate": 7e-05, |
|
"loss": 5.2535, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.15228426395939088, |
|
"grad_norm": 0.6113125681877136, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 5.3172, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.16243654822335024, |
|
"grad_norm": 0.5640417337417603, |
|
"learning_rate": 8e-05, |
|
"loss": 5.2758, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.17258883248730963, |
|
"grad_norm": 0.4186924397945404, |
|
"learning_rate": 8.5e-05, |
|
"loss": 5.2586, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.18274111675126903, |
|
"grad_norm": 0.5193726420402527, |
|
"learning_rate": 9e-05, |
|
"loss": 5.2615, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.19289340101522842, |
|
"grad_norm": 0.5647014379501343, |
|
"learning_rate": 9.5e-05, |
|
"loss": 5.2652, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.20304568527918782, |
|
"grad_norm": 0.5719868540763855, |
|
"learning_rate": 0.0001, |
|
"loss": 5.1955, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2131979695431472, |
|
"grad_norm": 0.6991127133369446, |
|
"learning_rate": 9.999238475781957e-05, |
|
"loss": 5.1646, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2233502538071066, |
|
"grad_norm": 0.7082369327545166, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 5.1052, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.233502538071066, |
|
"grad_norm": 0.7457136511802673, |
|
"learning_rate": 9.99314767377287e-05, |
|
"loss": 5.1863, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2436548223350254, |
|
"grad_norm": 1.6564850807189941, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 5.0987, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.25380710659898476, |
|
"grad_norm": 5.510101318359375, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 5.5984, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2639593908629442, |
|
"grad_norm": 4.215838432312012, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 5.5124, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.27411167512690354, |
|
"grad_norm": 1.8195075988769531, |
|
"learning_rate": 9.962730758206611e-05, |
|
"loss": 5.4004, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.28426395939086296, |
|
"grad_norm": 1.5931973457336426, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 5.3794, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.29441624365482233, |
|
"grad_norm": 0.6483675241470337, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 5.3566, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.30456852791878175, |
|
"grad_norm": 0.3973766267299652, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 5.2972, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3147208121827411, |
|
"grad_norm": 0.3534790277481079, |
|
"learning_rate": 9.908135917238321e-05, |
|
"loss": 5.2902, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3248730964467005, |
|
"grad_norm": 0.3970736861228943, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 5.2949, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3350253807106599, |
|
"grad_norm": 0.37264251708984375, |
|
"learning_rate": 9.871850323926177e-05, |
|
"loss": 5.2944, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.34517766497461927, |
|
"grad_norm": 0.3872509002685547, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 5.326, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3553299492385787, |
|
"grad_norm": 0.3853321969509125, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 5.2708, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.36548223350253806, |
|
"grad_norm": 0.39588385820388794, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 5.3228, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3756345177664975, |
|
"grad_norm": 0.4093726575374603, |
|
"learning_rate": 9.781523779815179e-05, |
|
"loss": 5.2831, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.38578680203045684, |
|
"grad_norm": 0.4420938491821289, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 5.2103, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.39593908629441626, |
|
"grad_norm": 0.5512685775756836, |
|
"learning_rate": 9.727592877996585e-05, |
|
"loss": 5.1775, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.40609137055837563, |
|
"grad_norm": 0.3994300067424774, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 5.2478, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.41624365482233505, |
|
"grad_norm": 0.5147640705108643, |
|
"learning_rate": 9.667902132486009e-05, |
|
"loss": 5.1743, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4263959390862944, |
|
"grad_norm": 0.6237267851829529, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 5.2249, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4365482233502538, |
|
"grad_norm": 0.5305359363555908, |
|
"learning_rate": 9.602524267262203e-05, |
|
"loss": 5.2915, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4467005076142132, |
|
"grad_norm": 0.4987878203392029, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 5.253, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.45685279187817257, |
|
"grad_norm": 0.6333546042442322, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 5.1612, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.467005076142132, |
|
"grad_norm": 0.7043673396110535, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 5.1424, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.47715736040609136, |
|
"grad_norm": 0.6457162499427795, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 5.1876, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4873096446700508, |
|
"grad_norm": 0.9820389151573181, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 5.0792, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.49746192893401014, |
|
"grad_norm": 2.78731369972229, |
|
"learning_rate": 9.373098535696979e-05, |
|
"loss": 5.5062, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5076142131979695, |
|
"grad_norm": 2.0711777210235596, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.4469, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5076142131979695, |
|
"eval_loss": 5.311061859130859, |
|
"eval_runtime": 8.7447, |
|
"eval_samples_per_second": 18.983, |
|
"eval_steps_per_second": 9.491, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.84541717577728e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|