|
{ |
|
"best_metric": 10.371206283569336, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.4545454545454546, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"grad_norm": 0.04542732611298561, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3755, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"eval_loss": 10.378427505493164, |
|
"eval_runtime": 0.2157, |
|
"eval_samples_per_second": 1075.713, |
|
"eval_steps_per_second": 134.464, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05818181818181818, |
|
"grad_norm": 0.044699184596538544, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3757, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08727272727272728, |
|
"grad_norm": 0.04836718365550041, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3797, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.11636363636363636, |
|
"grad_norm": 0.04853840544819832, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3785, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.14545454545454545, |
|
"grad_norm": 0.04985123127698898, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3808, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.17454545454545456, |
|
"grad_norm": 0.05220064893364906, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3776, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.20363636363636364, |
|
"grad_norm": 0.054472118616104126, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3783, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.23272727272727273, |
|
"grad_norm": 0.06365810334682465, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3777, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.26181818181818184, |
|
"grad_norm": 0.0501365028321743, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3754, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2909090909090909, |
|
"grad_norm": 0.048848334699869156, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3747, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.04923773556947708, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3744, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3490909090909091, |
|
"grad_norm": 0.04974054545164108, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3749, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.3781818181818182, |
|
"grad_norm": 0.05604294314980507, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3741, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.4072727272727273, |
|
"grad_norm": 0.05577346310019493, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3788, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.43636363636363634, |
|
"grad_norm": 0.06580042093992233, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3744, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.46545454545454545, |
|
"grad_norm": 0.06522443145513535, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3795, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.49454545454545457, |
|
"grad_norm": 0.08119519799947739, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3759, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.5236363636363637, |
|
"grad_norm": 0.056983832269907, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3713, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.5527272727272727, |
|
"grad_norm": 0.052240677177906036, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3741, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5818181818181818, |
|
"grad_norm": 0.06199200823903084, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3755, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.610909090909091, |
|
"grad_norm": 0.06343241035938263, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3748, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.07450269907712936, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.374, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6690909090909091, |
|
"grad_norm": 0.0749194473028183, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3728, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6981818181818182, |
|
"grad_norm": 0.08099077641963959, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.373, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.08432656526565552, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3731, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"eval_loss": 10.373350143432617, |
|
"eval_runtime": 0.212, |
|
"eval_samples_per_second": 1094.239, |
|
"eval_steps_per_second": 136.78, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7563636363636363, |
|
"grad_norm": 0.07291626930236816, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3713, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.7854545454545454, |
|
"grad_norm": 0.06883475184440613, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.37, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.8145454545454546, |
|
"grad_norm": 0.07164252549409866, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.3727, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.8436363636363636, |
|
"grad_norm": 0.07420614361763, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3716, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.8727272727272727, |
|
"grad_norm": 0.07898180931806564, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3718, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9018181818181819, |
|
"grad_norm": 0.08564016222953796, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.3744, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.9309090909090909, |
|
"grad_norm": 0.08289813995361328, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3717, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.10542256385087967, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.37, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.9890909090909091, |
|
"grad_norm": 0.11940684914588928, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3709, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.018181818181818, |
|
"grad_norm": 0.14024017751216888, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 17.4511, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.0472727272727274, |
|
"grad_norm": 0.06805068999528885, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 9.9872, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.0763636363636364, |
|
"grad_norm": 0.08220560103654861, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.3888, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.1054545454545455, |
|
"grad_norm": 0.10088109970092773, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.3326, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.1345454545454545, |
|
"grad_norm": 0.09310103207826614, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.332, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.1636363636363636, |
|
"grad_norm": 0.09103864431381226, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.4093, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1927272727272726, |
|
"grad_norm": 0.09932649880647659, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.4759, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.221818181818182, |
|
"grad_norm": 0.12177771329879761, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.4992, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.250909090909091, |
|
"grad_norm": 0.06674141436815262, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 7.3741, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.104917012155056, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 13.2692, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.309090909090909, |
|
"grad_norm": 0.07617160677909851, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.2841, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.3381818181818181, |
|
"grad_norm": 0.08638501167297363, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.3278, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.3672727272727272, |
|
"grad_norm": 0.09070190787315369, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3126, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.3963636363636365, |
|
"grad_norm": 0.08757533878087997, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.4069, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.4254545454545455, |
|
"grad_norm": 0.10536868870258331, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.4002, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 0.08869392424821854, |
|
"learning_rate": 0.0, |
|
"loss": 10.4616, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"eval_loss": 10.371206283569336, |
|
"eval_runtime": 0.2127, |
|
"eval_samples_per_second": 1090.833, |
|
"eval_steps_per_second": 136.354, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42768059596800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|