|
{ |
|
"best_metric": 0.3135169446468353, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.22148394241417496, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004429678848283499, |
|
"grad_norm": 90.43601989746094, |
|
"learning_rate": 5e-05, |
|
"loss": 186.7866, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004429678848283499, |
|
"eval_loss": 11.860960006713867, |
|
"eval_runtime": 59.9491, |
|
"eval_samples_per_second": 25.372, |
|
"eval_steps_per_second": 3.186, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008859357696566999, |
|
"grad_norm": 92.87113952636719, |
|
"learning_rate": 0.0001, |
|
"loss": 191.192, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.013289036544850499, |
|
"grad_norm": 95.75019836425781, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 186.3358, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.017718715393133997, |
|
"grad_norm": 131.21568298339844, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 169.4684, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0221483942414175, |
|
"grad_norm": 133.9580535888672, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 140.7173, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.026578073089700997, |
|
"grad_norm": 115.58403015136719, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 116.9708, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.031007751937984496, |
|
"grad_norm": 114.49787139892578, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 95.8174, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.035437430786267994, |
|
"grad_norm": 112.3747329711914, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 76.4804, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03986710963455149, |
|
"grad_norm": 134.6016845703125, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 52.7218, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.044296788482835, |
|
"grad_norm": 140.690673828125, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 28.0401, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.048726467331118496, |
|
"grad_norm": 73.04218292236328, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 13.6756, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.053156146179401995, |
|
"grad_norm": 81.19536590576172, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 11.1729, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.05758582502768549, |
|
"grad_norm": 56.66990661621094, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 9.8869, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.06201550387596899, |
|
"grad_norm": 28.945405960083008, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 9.031, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0664451827242525, |
|
"grad_norm": 36.69380569458008, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 8.7398, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07087486157253599, |
|
"grad_norm": 22.542613983154297, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 8.2923, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0753045404208195, |
|
"grad_norm": 76.44745635986328, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 9.5592, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.07973421926910298, |
|
"grad_norm": 60.888092041015625, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 7.8758, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.08416389811738649, |
|
"grad_norm": 66.46916198730469, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 6.9583, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.08859357696567, |
|
"grad_norm": 67.2452163696289, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 8.2272, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09302325581395349, |
|
"grad_norm": 53.93881607055664, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 7.0548, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.09745293466223699, |
|
"grad_norm": 80.97118377685547, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 7.2771, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.10188261351052048, |
|
"grad_norm": 37.07639694213867, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 6.2439, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.10631229235880399, |
|
"grad_norm": 53.39252471923828, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 6.0297, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.11074197120708748, |
|
"grad_norm": 50.96709442138672, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 5.7838, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.11074197120708748, |
|
"eval_loss": 0.4496269226074219, |
|
"eval_runtime": 59.8952, |
|
"eval_samples_per_second": 25.394, |
|
"eval_steps_per_second": 3.189, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.11517165005537099, |
|
"grad_norm": 43.56239318847656, |
|
"learning_rate": 5e-05, |
|
"loss": 8.002, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.11960132890365449, |
|
"grad_norm": 42.219730377197266, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 7.7278, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.12403100775193798, |
|
"grad_norm": 51.25581741333008, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 7.8858, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.12846068660022147, |
|
"grad_norm": 28.833730697631836, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 7.4616, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.132890365448505, |
|
"grad_norm": 36.36983108520508, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 5.8613, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13732004429678848, |
|
"grad_norm": 24.915283203125, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 6.0989, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.14174972314507198, |
|
"grad_norm": 26.047985076904297, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 5.5893, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1461794019933555, |
|
"grad_norm": 17.464637756347656, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 5.6094, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.150609080841639, |
|
"grad_norm": 19.809972763061523, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 5.3021, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.15503875968992248, |
|
"grad_norm": 29.769086837768555, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 5.5138, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.15946843853820597, |
|
"grad_norm": 25.799795150756836, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 4.979, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1638981173864895, |
|
"grad_norm": 25.358388900756836, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 3.709, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.16832779623477298, |
|
"grad_norm": 26.125646591186523, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 4.5807, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.17275747508305647, |
|
"grad_norm": 20.812122344970703, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 6.1171, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.17718715393134, |
|
"grad_norm": 20.470781326293945, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 6.5875, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.18161683277962348, |
|
"grad_norm": 17.11683464050293, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 6.203, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.18604651162790697, |
|
"grad_norm": 15.608457565307617, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 4.9995, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 14.48198127746582, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 5.2074, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.19490586932447398, |
|
"grad_norm": 17.412363052368164, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 5.2764, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.19933554817275748, |
|
"grad_norm": 26.537670135498047, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 5.5755, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.20376522702104097, |
|
"grad_norm": 25.966537475585938, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 5.1873, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2081949058693245, |
|
"grad_norm": 19.112852096557617, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 5.2322, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.21262458471760798, |
|
"grad_norm": 19.956279754638672, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 5.3597, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.21705426356589147, |
|
"grad_norm": 26.704853057861328, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 4.1194, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.22148394241417496, |
|
"grad_norm": 22.032495498657227, |
|
"learning_rate": 0.0, |
|
"loss": 3.1469, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.22148394241417496, |
|
"eval_loss": 0.3135169446468353, |
|
"eval_runtime": 59.89, |
|
"eval_samples_per_second": 25.397, |
|
"eval_steps_per_second": 3.189, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0205684108886016e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|