|
{ |
|
"best_metric": 11.926499366760254, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.11728485559302156, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002345697111860431, |
|
"grad_norm": 0.023263728246092796, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9223, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002345697111860431, |
|
"eval_loss": 11.929311752319336, |
|
"eval_runtime": 12.1067, |
|
"eval_samples_per_second": 237.223, |
|
"eval_steps_per_second": 29.653, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004691394223720862, |
|
"grad_norm": 0.023828286677598953, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9159, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007037091335581293, |
|
"grad_norm": 0.0231478288769722, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 11.9124, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009382788447441724, |
|
"grad_norm": 0.025272436439990997, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 11.909, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.011728485559302155, |
|
"grad_norm": 0.02534029819071293, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 11.9057, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014074182671162587, |
|
"grad_norm": 0.02812816947698593, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 11.9028, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.016419879783023016, |
|
"grad_norm": 0.027446430176496506, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 11.8997, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.018765576894883448, |
|
"grad_norm": 0.030333541333675385, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 11.896, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02111127400674388, |
|
"grad_norm": 0.0286606065928936, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 11.8936, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02345697111860431, |
|
"grad_norm": 0.032484058290719986, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 11.8902, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.025802668230464742, |
|
"grad_norm": 0.028907716274261475, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 11.8855, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.028148365342325173, |
|
"grad_norm": 0.03208125755190849, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 11.8753, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.030494062454185605, |
|
"grad_norm": 0.029715213924646378, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 11.916, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03283975956604603, |
|
"grad_norm": 0.02765621244907379, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.9169, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.035185456677906464, |
|
"grad_norm": 0.02788020670413971, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 11.9128, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.037531153789766895, |
|
"grad_norm": 0.03472643718123436, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 11.9087, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03987685090162733, |
|
"grad_norm": 0.03742455691099167, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 11.9057, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04222254801348776, |
|
"grad_norm": 0.038485001772642136, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 11.9032, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04456824512534819, |
|
"grad_norm": 0.040359240025281906, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 11.9001, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04691394223720862, |
|
"grad_norm": 0.037473585456609726, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 11.8976, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04925963934906905, |
|
"grad_norm": 0.04298529773950577, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 11.8937, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.051605336460929484, |
|
"grad_norm": 0.04474135488271713, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 11.891, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.053951033572789915, |
|
"grad_norm": 0.049919430166482925, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 11.8866, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.056296730684650347, |
|
"grad_norm": 0.0465259775519371, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 11.8801, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05864242779651078, |
|
"grad_norm": 0.04526961222290993, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 11.8617, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05864242779651078, |
|
"eval_loss": 11.927361488342285, |
|
"eval_runtime": 12.1456, |
|
"eval_samples_per_second": 236.464, |
|
"eval_steps_per_second": 29.558, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06098812490837121, |
|
"grad_norm": 0.046790074557065964, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9202, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06333382202023163, |
|
"grad_norm": 0.045101504772901535, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 11.9141, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06567951913209207, |
|
"grad_norm": 0.0466698482632637, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 11.9107, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0680252162439525, |
|
"grad_norm": 0.05154228210449219, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 11.9064, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07037091335581293, |
|
"grad_norm": 0.05609067901968956, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 11.9028, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07271661046767336, |
|
"grad_norm": 0.054825764149427414, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 11.8996, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07506230757953379, |
|
"grad_norm": 0.05315624177455902, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 11.8977, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07740800469139422, |
|
"grad_norm": 0.05658416450023651, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 11.8949, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07975370180325465, |
|
"grad_norm": 0.05000322312116623, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 11.8919, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08209939891511508, |
|
"grad_norm": 0.06540560722351074, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 11.8874, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08444509602697552, |
|
"grad_norm": 0.051300302147865295, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 11.8832, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08679079313883595, |
|
"grad_norm": 0.05268245190382004, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 11.8738, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08913649025069638, |
|
"grad_norm": 0.05355386808514595, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.9155, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09148218736255681, |
|
"grad_norm": 0.05276748165488243, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 11.916, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09382788447441724, |
|
"grad_norm": 0.05582776665687561, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 11.9113, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09617358158627767, |
|
"grad_norm": 0.06121082231402397, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 11.9071, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0985192786981381, |
|
"grad_norm": 0.054675329476594925, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 11.904, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10086497580999854, |
|
"grad_norm": 0.06488271802663803, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 11.9007, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.10321067292185897, |
|
"grad_norm": 0.0639345645904541, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.8972, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1055563700337194, |
|
"grad_norm": 0.06195634603500366, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 11.8954, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10790206714557983, |
|
"grad_norm": 0.06594201922416687, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 11.8923, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11024776425744026, |
|
"grad_norm": 0.0577872097492218, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 11.8897, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11259346136930069, |
|
"grad_norm": 0.057595305144786835, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 11.8845, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11493915848116112, |
|
"grad_norm": 0.057064544409513474, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 11.8792, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.11728485559302156, |
|
"grad_norm": 0.05568932741880417, |
|
"learning_rate": 0.0, |
|
"loss": 11.8627, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11728485559302156, |
|
"eval_loss": 11.926499366760254, |
|
"eval_runtime": 12.1765, |
|
"eval_samples_per_second": 235.864, |
|
"eval_steps_per_second": 29.483, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1042494259200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|