|
{ |
|
"best_metric": 3.659489154815674, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.08991794987074295, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001798358997414859, |
|
"grad_norm": 1.892904281616211, |
|
"learning_rate": 5e-05, |
|
"loss": 7.5914, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001798358997414859, |
|
"eval_loss": 7.87414026260376, |
|
"eval_runtime": 99.3294, |
|
"eval_samples_per_second": 37.723, |
|
"eval_steps_per_second": 4.722, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003596717994829718, |
|
"grad_norm": 1.8523627519607544, |
|
"learning_rate": 0.0001, |
|
"loss": 7.2369, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005395076992244577, |
|
"grad_norm": 1.8711965084075928, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 7.1987, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007193435989659436, |
|
"grad_norm": 2.076180934906006, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 7.2361, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008991794987074295, |
|
"grad_norm": 2.2019765377044678, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 6.6906, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010790153984489154, |
|
"grad_norm": 2.547671318054199, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.5445, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.012588512981904013, |
|
"grad_norm": 2.629695177078247, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 6.2388, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.014386871979318872, |
|
"grad_norm": 2.921963691711426, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.8722, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01618523097673373, |
|
"grad_norm": 2.59285569190979, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 5.1905, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01798358997414859, |
|
"grad_norm": 2.041452407836914, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.264, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01978194897156345, |
|
"grad_norm": 2.5271718502044678, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 4.9013, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.021580307968978307, |
|
"grad_norm": 2.6541106700897217, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 5.3764, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.023378666966393166, |
|
"grad_norm": 2.242335796356201, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 5.0807, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.025177025963808025, |
|
"grad_norm": 2.364999532699585, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 4.1957, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.026975384961222884, |
|
"grad_norm": 2.047051191329956, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 4.0444, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.028773743958637743, |
|
"grad_norm": 1.811107873916626, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 4.0814, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.030572102956052602, |
|
"grad_norm": 1.699466347694397, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 3.9142, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03237046195346746, |
|
"grad_norm": 1.7499068975448608, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.0083, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03416882095088232, |
|
"grad_norm": 1.9480732679367065, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 4.0982, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03596717994829718, |
|
"grad_norm": 1.8687691688537598, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.1553, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03776553894571204, |
|
"grad_norm": 1.7071983814239502, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 3.8703, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0395638979431269, |
|
"grad_norm": 1.7296050786972046, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 4.2591, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.041362256940541756, |
|
"grad_norm": 1.612310528755188, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 4.3794, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.043160615937956615, |
|
"grad_norm": 1.6319174766540527, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 4.4063, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.044958974935371473, |
|
"grad_norm": 1.577831745147705, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 4.4786, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.044958974935371473, |
|
"eval_loss": 3.9233860969543457, |
|
"eval_runtime": 99.6308, |
|
"eval_samples_per_second": 37.609, |
|
"eval_steps_per_second": 4.707, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04675733393278633, |
|
"grad_norm": 1.887658953666687, |
|
"learning_rate": 5e-05, |
|
"loss": 3.5318, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04855569293020119, |
|
"grad_norm": 1.7948553562164307, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 3.3642, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05035405192761605, |
|
"grad_norm": 1.6475787162780762, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 3.3877, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05215241092503091, |
|
"grad_norm": 1.607811450958252, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 3.4879, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05395076992244577, |
|
"grad_norm": 1.7005120515823364, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 3.6429, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05574912891986063, |
|
"grad_norm": 1.7422455549240112, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 3.587, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.057547487917275486, |
|
"grad_norm": 1.6355663537979126, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 3.5677, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.059345846914690345, |
|
"grad_norm": 1.731115460395813, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 3.7815, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.061144205912105204, |
|
"grad_norm": 1.6795060634613037, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.7316, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06294256490952006, |
|
"grad_norm": 1.6759228706359863, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 3.8109, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06474092390693492, |
|
"grad_norm": 1.653753399848938, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 4.1712, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06653928290434978, |
|
"grad_norm": 1.7878714799880981, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 4.1051, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06833764190176464, |
|
"grad_norm": 1.6746171712875366, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.97, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0701360008991795, |
|
"grad_norm": 1.775249719619751, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.8569, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07193435989659436, |
|
"grad_norm": 1.6178361177444458, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.9099, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07373271889400922, |
|
"grad_norm": 1.8096075057983398, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 3.1256, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07553107789142408, |
|
"grad_norm": 1.727738857269287, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 3.3099, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07732943688883893, |
|
"grad_norm": 1.73582923412323, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 3.5413, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0791277958862538, |
|
"grad_norm": 1.7474498748779297, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 3.7499, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08092615488366865, |
|
"grad_norm": 1.7811682224273682, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 3.5255, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08272451388108351, |
|
"grad_norm": 1.751634955406189, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 3.8033, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08452287287849837, |
|
"grad_norm": 1.661466360092163, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 3.6937, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08632123187591323, |
|
"grad_norm": 1.6849561929702759, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 3.9422, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08811959087332809, |
|
"grad_norm": 1.6296294927597046, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 3.9785, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08991794987074295, |
|
"grad_norm": 1.6093735694885254, |
|
"learning_rate": 0.0, |
|
"loss": 4.2984, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08991794987074295, |
|
"eval_loss": 3.659489154815674, |
|
"eval_runtime": 99.7649, |
|
"eval_samples_per_second": 37.558, |
|
"eval_steps_per_second": 4.701, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.671510799768617e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|