farmery's picture
Training in progress, step 71, checkpoint
3b6fdcc verified
{
"best_metric": 6.703342437744141,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.0353198686697841,
"eval_steps": 25,
"global_step": 71,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004974629390110437,
"grad_norm": 2.0978946685791016,
"learning_rate": 0.00015,
"loss": 6.9589,
"step": 1
},
{
"epoch": 0.0004974629390110437,
"eval_loss": 6.9487104415893555,
"eval_runtime": 0.0398,
"eval_samples_per_second": 1255.231,
"eval_steps_per_second": 75.314,
"step": 1
},
{
"epoch": 0.0009949258780220873,
"grad_norm": 0.784283459186554,
"learning_rate": 0.0003,
"loss": 6.9495,
"step": 2
},
{
"epoch": 0.0014923888170331311,
"grad_norm": 1.2301733493804932,
"learning_rate": 0.0002998600959423082,
"loss": 6.9501,
"step": 3
},
{
"epoch": 0.0019898517560441747,
"grad_norm": 1.3414157629013062,
"learning_rate": 0.0002994406737417567,
"loss": 6.947,
"step": 4
},
{
"epoch": 0.0024873146950552183,
"grad_norm": 1.5272213220596313,
"learning_rate": 0.00029874260271490463,
"loss": 6.9409,
"step": 5
},
{
"epoch": 0.0029847776340662623,
"grad_norm": 1.9261209964752197,
"learning_rate": 0.00029776732972055516,
"loss": 6.9326,
"step": 6
},
{
"epoch": 0.003482240573077306,
"grad_norm": 1.9587503671646118,
"learning_rate": 0.0002965168761609197,
"loss": 6.9088,
"step": 7
},
{
"epoch": 0.003979703512088349,
"grad_norm": 0.8569906949996948,
"learning_rate": 0.0002949938337919529,
"loss": 6.9137,
"step": 8
},
{
"epoch": 0.004477166451099393,
"grad_norm": 1.1164758205413818,
"learning_rate": 0.0002932013593515431,
"loss": 6.9299,
"step": 9
},
{
"epoch": 0.0049746293901104365,
"grad_norm": 1.2469490766525269,
"learning_rate": 0.00029114316801669057,
"loss": 6.8841,
"step": 10
},
{
"epoch": 0.0054720923291214805,
"grad_norm": 1.5062528848648071,
"learning_rate": 0.00028882352570323616,
"loss": 6.8565,
"step": 11
},
{
"epoch": 0.0059695552681325245,
"grad_norm": 1.8433338403701782,
"learning_rate": 0.00028624724022409897,
"loss": 6.822,
"step": 12
},
{
"epoch": 0.006467018207143568,
"grad_norm": 1.1858328580856323,
"learning_rate": 0.0002834196513243502,
"loss": 6.8764,
"step": 13
},
{
"epoch": 0.006964481146154612,
"grad_norm": 1.0784205198287964,
"learning_rate": 0.0002803466196137759,
"loss": 6.8753,
"step": 14
},
{
"epoch": 0.007461944085165655,
"grad_norm": 0.946907639503479,
"learning_rate": 0.00027703451441986836,
"loss": 6.8735,
"step": 15
},
{
"epoch": 0.007959407024176699,
"grad_norm": 1.3893108367919922,
"learning_rate": 0.000273490200586422,
"loss": 6.8587,
"step": 16
},
{
"epoch": 0.008456869963187742,
"grad_norm": 0.709138810634613,
"learning_rate": 0.00026972102424509665,
"loss": 6.8263,
"step": 17
},
{
"epoch": 0.008954332902198787,
"grad_norm": 1.2287291288375854,
"learning_rate": 0.00026573479758943753,
"loss": 6.7822,
"step": 18
},
{
"epoch": 0.00945179584120983,
"grad_norm": 1.5594730377197266,
"learning_rate": 0.0002615397826829114,
"loss": 6.7248,
"step": 19
},
{
"epoch": 0.009949258780220873,
"grad_norm": 1.0414823293685913,
"learning_rate": 0.0002571446743345183,
"loss": 6.8372,
"step": 20
},
{
"epoch": 0.010446721719231918,
"grad_norm": 1.1876541376113892,
"learning_rate": 0.00025255858207747205,
"loss": 6.8816,
"step": 21
},
{
"epoch": 0.010944184658242961,
"grad_norm": 1.0211689472198486,
"learning_rate": 0.0002477910112883017,
"loss": 6.836,
"step": 22
},
{
"epoch": 0.011441647597254004,
"grad_norm": 1.1104573011398315,
"learning_rate": 0.00024285184348550706,
"loss": 6.7411,
"step": 23
},
{
"epoch": 0.011939110536265049,
"grad_norm": 1.2712668180465698,
"learning_rate": 0.0002377513158486027,
"loss": 6.6863,
"step": 24
},
{
"epoch": 0.012436573475276092,
"grad_norm": 1.0742238759994507,
"learning_rate": 0.00023249999999999999,
"loss": 6.7343,
"step": 25
},
{
"epoch": 0.012436573475276092,
"eval_loss": 6.77644157409668,
"eval_runtime": 0.0409,
"eval_samples_per_second": 1222.387,
"eval_steps_per_second": 73.343,
"step": 25
},
{
"epoch": 0.012934036414287135,
"grad_norm": 1.5590488910675049,
"learning_rate": 0.00022710878009370554,
"loss": 6.8274,
"step": 26
},
{
"epoch": 0.013431499353298178,
"grad_norm": 0.6883417367935181,
"learning_rate": 0.00022158883025624965,
"loss": 6.8454,
"step": 27
},
{
"epoch": 0.013928962292309223,
"grad_norm": 1.1368870735168457,
"learning_rate": 0.0002159515914266029,
"loss": 6.8375,
"step": 28
},
{
"epoch": 0.014426425231320266,
"grad_norm": 0.9605547785758972,
"learning_rate": 0.0002102087476430831,
"loss": 6.8131,
"step": 29
},
{
"epoch": 0.01492388817033131,
"grad_norm": 1.0653886795043945,
"learning_rate": 0.00020437220182640135,
"loss": 6.71,
"step": 30
},
{
"epoch": 0.015421351109342354,
"grad_norm": 1.2283194065093994,
"learning_rate": 0.00019845405110904146,
"loss": 6.6504,
"step": 31
},
{
"epoch": 0.015918814048353398,
"grad_norm": 0.8201404213905334,
"learning_rate": 0.00019246656176210558,
"loss": 6.7202,
"step": 32
},
{
"epoch": 0.01641627698736444,
"grad_norm": 0.8443421125411987,
"learning_rate": 0.0001864221437715939,
"loss": 6.8298,
"step": 33
},
{
"epoch": 0.016913739926375484,
"grad_norm": 1.0041660070419312,
"learning_rate": 0.0001803333251168141,
"loss": 6.8294,
"step": 34
},
{
"epoch": 0.01741120286538653,
"grad_norm": 0.729718804359436,
"learning_rate": 0.00017421272580423058,
"loss": 6.7173,
"step": 35
},
{
"epoch": 0.017908665804397574,
"grad_norm": 0.9453200101852417,
"learning_rate": 0.00016807303171057425,
"loss": 6.6455,
"step": 36
},
{
"epoch": 0.018406128743408617,
"grad_norm": 1.152786135673523,
"learning_rate": 0.00016192696828942573,
"loss": 6.5751,
"step": 37
},
{
"epoch": 0.01890359168241966,
"grad_norm": 1.0720747709274292,
"learning_rate": 0.00015578727419576942,
"loss": 6.8176,
"step": 38
},
{
"epoch": 0.019401054621430703,
"grad_norm": 1.0019649267196655,
"learning_rate": 0.00014966667488318586,
"loss": 6.8212,
"step": 39
},
{
"epoch": 0.019898517560441746,
"grad_norm": 1.0470595359802246,
"learning_rate": 0.00014357785622840606,
"loss": 6.8254,
"step": 40
},
{
"epoch": 0.02039598049945279,
"grad_norm": 1.326593279838562,
"learning_rate": 0.00013753343823789445,
"loss": 6.7906,
"step": 41
},
{
"epoch": 0.020893443438463836,
"grad_norm": 0.5931538939476013,
"learning_rate": 0.00013154594889095854,
"loss": 6.7336,
"step": 42
},
{
"epoch": 0.02139090637747488,
"grad_norm": 0.9888867735862732,
"learning_rate": 0.00012562779817359865,
"loss": 6.6365,
"step": 43
},
{
"epoch": 0.021888369316485922,
"grad_norm": 1.1606509685516357,
"learning_rate": 0.00011979125235691685,
"loss": 6.5585,
"step": 44
},
{
"epoch": 0.022385832255496965,
"grad_norm": 0.9471900463104248,
"learning_rate": 0.00011404840857339706,
"loss": 6.7794,
"step": 45
},
{
"epoch": 0.02288329519450801,
"grad_norm": 1.0708708763122559,
"learning_rate": 0.0001084111697437504,
"loss": 6.8215,
"step": 46
},
{
"epoch": 0.02338075813351905,
"grad_norm": 0.7427545785903931,
"learning_rate": 0.00010289121990629447,
"loss": 6.7288,
"step": 47
},
{
"epoch": 0.023878221072530098,
"grad_norm": 0.9723594188690186,
"learning_rate": 9.750000000000003e-05,
"loss": 6.6319,
"step": 48
},
{
"epoch": 0.02437568401154114,
"grad_norm": 0.9104545712471008,
"learning_rate": 9.22486841513973e-05,
"loss": 6.5588,
"step": 49
},
{
"epoch": 0.024873146950552184,
"grad_norm": 0.8541779518127441,
"learning_rate": 8.714815651449293e-05,
"loss": 6.6375,
"step": 50
},
{
"epoch": 0.024873146950552184,
"eval_loss": 6.703342437744141,
"eval_runtime": 0.0414,
"eval_samples_per_second": 1208.754,
"eval_steps_per_second": 72.525,
"step": 50
},
{
"epoch": 0.025370609889563227,
"grad_norm": 1.8086193799972534,
"learning_rate": 8.220898871169827e-05,
"loss": 6.8079,
"step": 51
},
{
"epoch": 0.02586807282857427,
"grad_norm": 0.7370948195457458,
"learning_rate": 7.744141792252794e-05,
"loss": 6.8023,
"step": 52
},
{
"epoch": 0.026365535767585314,
"grad_norm": 1.0690277814865112,
"learning_rate": 7.285532566548172e-05,
"loss": 6.7905,
"step": 53
},
{
"epoch": 0.026862998706596357,
"grad_norm": 1.2137597799301147,
"learning_rate": 6.846021731708856e-05,
"loss": 6.77,
"step": 54
},
{
"epoch": 0.027360461645607403,
"grad_norm": 0.7531571984291077,
"learning_rate": 6.426520241056245e-05,
"loss": 6.661,
"step": 55
},
{
"epoch": 0.027857924584618447,
"grad_norm": 0.8767682313919067,
"learning_rate": 6.0278975754903317e-05,
"loss": 6.6017,
"step": 56
},
{
"epoch": 0.02835538752362949,
"grad_norm": 0.7823953628540039,
"learning_rate": 5.6509799413577934e-05,
"loss": 6.5955,
"step": 57
},
{
"epoch": 0.028852850462640533,
"grad_norm": 0.8727318048477173,
"learning_rate": 5.296548558013161e-05,
"loss": 6.7922,
"step": 58
},
{
"epoch": 0.029350313401651576,
"grad_norm": 0.996519148349762,
"learning_rate": 4.9653380386224046e-05,
"loss": 6.7956,
"step": 59
},
{
"epoch": 0.02984777634066262,
"grad_norm": 0.6733922362327576,
"learning_rate": 4.658034867564977e-05,
"loss": 6.6694,
"step": 60
},
{
"epoch": 0.030345239279673666,
"grad_norm": 0.8655278086662292,
"learning_rate": 4.375275977590104e-05,
"loss": 6.5892,
"step": 61
},
{
"epoch": 0.03084270221868471,
"grad_norm": 0.9837325811386108,
"learning_rate": 4.117647429676387e-05,
"loss": 6.5165,
"step": 62
},
{
"epoch": 0.03134016515769575,
"grad_norm": 1.1237236261367798,
"learning_rate": 3.885683198330941e-05,
"loss": 6.7888,
"step": 63
},
{
"epoch": 0.031837628096706795,
"grad_norm": 1.0909230709075928,
"learning_rate": 3.679864064845691e-05,
"loss": 6.8041,
"step": 64
},
{
"epoch": 0.03233509103571784,
"grad_norm": 0.9223988652229309,
"learning_rate": 3.500616620804712e-05,
"loss": 6.7929,
"step": 65
},
{
"epoch": 0.03283255397472888,
"grad_norm": 1.2748134136199951,
"learning_rate": 3.348312383908033e-05,
"loss": 6.7633,
"step": 66
},
{
"epoch": 0.03333001691373993,
"grad_norm": 0.5331524014472961,
"learning_rate": 3.223267027944483e-05,
"loss": 6.7201,
"step": 67
},
{
"epoch": 0.03382747985275097,
"grad_norm": 0.9274587035179138,
"learning_rate": 3.125739728509535e-05,
"loss": 6.6011,
"step": 68
},
{
"epoch": 0.034324942791762014,
"grad_norm": 1.011277198791504,
"learning_rate": 3.055932625824328e-05,
"loss": 6.5109,
"step": 69
},
{
"epoch": 0.03482240573077306,
"grad_norm": 0.7799229025840759,
"learning_rate": 3.0139904057691777e-05,
"loss": 6.6674,
"step": 70
},
{
"epoch": 0.0353198686697841,
"grad_norm": 1.039928674697876,
"learning_rate": 2.9999999999999997e-05,
"loss": 6.8205,
"step": 71
}
],
"logging_steps": 1,
"max_steps": 71,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7767234478080.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}