RoyJoy's picture
Training in progress, step 71, checkpoint
3242446 verified
raw
history blame
14 kB
{
"best_metric": 6.696208477020264,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.0353198686697841,
"eval_steps": 25,
"global_step": 71,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004974629390110437,
"grad_norm": 2.1102797985076904,
"learning_rate": 0.00015,
"loss": 6.9589,
"step": 1
},
{
"epoch": 0.0004974629390110437,
"eval_loss": 6.948693752288818,
"eval_runtime": 0.0403,
"eval_samples_per_second": 1242.035,
"eval_steps_per_second": 74.522,
"step": 1
},
{
"epoch": 0.0009949258780220873,
"grad_norm": 0.7844905853271484,
"learning_rate": 0.0003,
"loss": 6.9495,
"step": 2
},
{
"epoch": 0.0014923888170331311,
"grad_norm": 1.2520802021026611,
"learning_rate": 0.0002998600959423082,
"loss": 6.9498,
"step": 3
},
{
"epoch": 0.0019898517560441747,
"grad_norm": 1.3780094385147095,
"learning_rate": 0.0002994406737417567,
"loss": 6.9461,
"step": 4
},
{
"epoch": 0.0024873146950552183,
"grad_norm": 1.5773358345031738,
"learning_rate": 0.00029874260271490463,
"loss": 6.9386,
"step": 5
},
{
"epoch": 0.0029847776340662623,
"grad_norm": 1.953880786895752,
"learning_rate": 0.00029776732972055516,
"loss": 6.9294,
"step": 6
},
{
"epoch": 0.003482240573077306,
"grad_norm": 1.976645588874817,
"learning_rate": 0.0002965168761609197,
"loss": 6.9055,
"step": 7
},
{
"epoch": 0.003979703512088349,
"grad_norm": 0.8517647385597229,
"learning_rate": 0.0002949938337919529,
"loss": 6.9133,
"step": 8
},
{
"epoch": 0.004477166451099393,
"grad_norm": 1.1108756065368652,
"learning_rate": 0.0002932013593515431,
"loss": 6.9281,
"step": 9
},
{
"epoch": 0.0049746293901104365,
"grad_norm": 1.314795732498169,
"learning_rate": 0.00029114316801669057,
"loss": 6.8758,
"step": 10
},
{
"epoch": 0.0054720923291214805,
"grad_norm": 1.5624110698699951,
"learning_rate": 0.00028882352570323616,
"loss": 6.8467,
"step": 11
},
{
"epoch": 0.0059695552681325245,
"grad_norm": 1.8820241689682007,
"learning_rate": 0.00028624724022409897,
"loss": 6.8124,
"step": 12
},
{
"epoch": 0.006467018207143568,
"grad_norm": 1.1546019315719604,
"learning_rate": 0.0002834196513243502,
"loss": 6.8769,
"step": 13
},
{
"epoch": 0.006964481146154612,
"grad_norm": 1.020631194114685,
"learning_rate": 0.0002803466196137759,
"loss": 6.8753,
"step": 14
},
{
"epoch": 0.007461944085165655,
"grad_norm": 0.9455771446228027,
"learning_rate": 0.00027703451441986836,
"loss": 6.8669,
"step": 15
},
{
"epoch": 0.007959407024176699,
"grad_norm": 1.3777406215667725,
"learning_rate": 0.000273490200586422,
"loss": 6.8486,
"step": 16
},
{
"epoch": 0.008456869963187742,
"grad_norm": 0.7217961549758911,
"learning_rate": 0.00026972102424509665,
"loss": 6.8179,
"step": 17
},
{
"epoch": 0.008954332902198787,
"grad_norm": 1.2333062887191772,
"learning_rate": 0.00026573479758943753,
"loss": 6.7716,
"step": 18
},
{
"epoch": 0.00945179584120983,
"grad_norm": 1.557617425918579,
"learning_rate": 0.0002615397826829114,
"loss": 6.7166,
"step": 19
},
{
"epoch": 0.009949258780220873,
"grad_norm": 1.0129878520965576,
"learning_rate": 0.0002571446743345183,
"loss": 6.8344,
"step": 20
},
{
"epoch": 0.010446721719231918,
"grad_norm": 1.1682052612304688,
"learning_rate": 0.00025255858207747205,
"loss": 6.8857,
"step": 21
},
{
"epoch": 0.010944184658242961,
"grad_norm": 1.0228655338287354,
"learning_rate": 0.0002477910112883017,
"loss": 6.8231,
"step": 22
},
{
"epoch": 0.011441647597254004,
"grad_norm": 1.154994010925293,
"learning_rate": 0.00024285184348550706,
"loss": 6.7277,
"step": 23
},
{
"epoch": 0.011939110536265049,
"grad_norm": 1.3172268867492676,
"learning_rate": 0.0002377513158486027,
"loss": 6.6751,
"step": 24
},
{
"epoch": 0.012436573475276092,
"grad_norm": 1.1298874616622925,
"learning_rate": 0.00023249999999999999,
"loss": 6.7232,
"step": 25
},
{
"epoch": 0.012436573475276092,
"eval_loss": 6.769708633422852,
"eval_runtime": 0.0413,
"eval_samples_per_second": 1209.612,
"eval_steps_per_second": 72.577,
"step": 25
},
{
"epoch": 0.012934036414287135,
"grad_norm": 1.482825756072998,
"learning_rate": 0.00022710878009370554,
"loss": 6.835,
"step": 26
},
{
"epoch": 0.013431499353298178,
"grad_norm": 0.6565159559249878,
"learning_rate": 0.00022158883025624965,
"loss": 6.8394,
"step": 27
},
{
"epoch": 0.013928962292309223,
"grad_norm": 1.1333484649658203,
"learning_rate": 0.0002159515914266029,
"loss": 6.8224,
"step": 28
},
{
"epoch": 0.014426425231320266,
"grad_norm": 0.9818063974380493,
"learning_rate": 0.0002102087476430831,
"loss": 6.8011,
"step": 29
},
{
"epoch": 0.01492388817033131,
"grad_norm": 1.1090376377105713,
"learning_rate": 0.00020437220182640135,
"loss": 6.6984,
"step": 30
},
{
"epoch": 0.015421351109342354,
"grad_norm": 1.2643979787826538,
"learning_rate": 0.00019845405110904146,
"loss": 6.6402,
"step": 31
},
{
"epoch": 0.015918814048353398,
"grad_norm": 0.8210477232933044,
"learning_rate": 0.00019246656176210558,
"loss": 6.7164,
"step": 32
},
{
"epoch": 0.01641627698736444,
"grad_norm": 0.8293486833572388,
"learning_rate": 0.0001864221437715939,
"loss": 6.8353,
"step": 33
},
{
"epoch": 0.016913739926375484,
"grad_norm": 0.9909377098083496,
"learning_rate": 0.0001803333251168141,
"loss": 6.8237,
"step": 34
},
{
"epoch": 0.01741120286538653,
"grad_norm": 0.7456725239753723,
"learning_rate": 0.00017421272580423058,
"loss": 6.7016,
"step": 35
},
{
"epoch": 0.017908665804397574,
"grad_norm": 0.9571717381477356,
"learning_rate": 0.00016807303171057425,
"loss": 6.6303,
"step": 36
},
{
"epoch": 0.018406128743408617,
"grad_norm": 1.1454938650131226,
"learning_rate": 0.00016192696828942573,
"loss": 6.5629,
"step": 37
},
{
"epoch": 0.01890359168241966,
"grad_norm": 1.051835298538208,
"learning_rate": 0.00015578727419576942,
"loss": 6.8194,
"step": 38
},
{
"epoch": 0.019401054621430703,
"grad_norm": 0.9183468818664551,
"learning_rate": 0.00014966667488318586,
"loss": 6.8263,
"step": 39
},
{
"epoch": 0.019898517560441746,
"grad_norm": 1.002596378326416,
"learning_rate": 0.00014357785622840606,
"loss": 6.8061,
"step": 40
},
{
"epoch": 0.02039598049945279,
"grad_norm": 1.323659896850586,
"learning_rate": 0.00013753343823789445,
"loss": 6.7613,
"step": 41
},
{
"epoch": 0.020893443438463836,
"grad_norm": 0.601455569267273,
"learning_rate": 0.00013154594889095854,
"loss": 6.721,
"step": 42
},
{
"epoch": 0.02139090637747488,
"grad_norm": 0.9729992747306824,
"learning_rate": 0.00012562779817359865,
"loss": 6.6221,
"step": 43
},
{
"epoch": 0.021888369316485922,
"grad_norm": 1.1012927293777466,
"learning_rate": 0.00011979125235691685,
"loss": 6.5465,
"step": 44
},
{
"epoch": 0.022385832255496965,
"grad_norm": 0.921903133392334,
"learning_rate": 0.00011404840857339706,
"loss": 6.7825,
"step": 45
},
{
"epoch": 0.02288329519450801,
"grad_norm": 1.041771650314331,
"learning_rate": 0.0001084111697437504,
"loss": 6.8214,
"step": 46
},
{
"epoch": 0.02338075813351905,
"grad_norm": 0.6873188018798828,
"learning_rate": 0.00010289121990629447,
"loss": 6.7128,
"step": 47
},
{
"epoch": 0.023878221072530098,
"grad_norm": 0.9764477014541626,
"learning_rate": 9.750000000000003e-05,
"loss": 6.6194,
"step": 48
},
{
"epoch": 0.02437568401154114,
"grad_norm": 0.851015031337738,
"learning_rate": 9.22486841513973e-05,
"loss": 6.55,
"step": 49
},
{
"epoch": 0.024873146950552184,
"grad_norm": 0.7856554388999939,
"learning_rate": 8.714815651449293e-05,
"loss": 6.628,
"step": 50
},
{
"epoch": 0.024873146950552184,
"eval_loss": 6.696208477020264,
"eval_runtime": 0.0415,
"eval_samples_per_second": 1204.166,
"eval_steps_per_second": 72.25,
"step": 50
},
{
"epoch": 0.025370609889563227,
"grad_norm": 1.6974908113479614,
"learning_rate": 8.220898871169827e-05,
"loss": 6.8279,
"step": 51
},
{
"epoch": 0.02586807282857427,
"grad_norm": 0.6426116824150085,
"learning_rate": 7.744141792252794e-05,
"loss": 6.802,
"step": 52
},
{
"epoch": 0.026365535767585314,
"grad_norm": 1.004481315612793,
"learning_rate": 7.285532566548172e-05,
"loss": 6.767,
"step": 53
},
{
"epoch": 0.026862998706596357,
"grad_norm": 1.2093220949172974,
"learning_rate": 6.846021731708856e-05,
"loss": 6.7411,
"step": 54
},
{
"epoch": 0.027360461645607403,
"grad_norm": 0.7267762422561646,
"learning_rate": 6.426520241056245e-05,
"loss": 6.6548,
"step": 55
},
{
"epoch": 0.027857924584618447,
"grad_norm": 0.8319692611694336,
"learning_rate": 6.0278975754903317e-05,
"loss": 6.5916,
"step": 56
},
{
"epoch": 0.02835538752362949,
"grad_norm": 0.7229805588722229,
"learning_rate": 5.6509799413577934e-05,
"loss": 6.593,
"step": 57
},
{
"epoch": 0.028852850462640533,
"grad_norm": 0.8349987268447876,
"learning_rate": 5.296548558013161e-05,
"loss": 6.7984,
"step": 58
},
{
"epoch": 0.029350313401651576,
"grad_norm": 0.9424655437469482,
"learning_rate": 4.9653380386224046e-05,
"loss": 6.7904,
"step": 59
},
{
"epoch": 0.02984777634066262,
"grad_norm": 0.6447422504425049,
"learning_rate": 4.658034867564977e-05,
"loss": 6.66,
"step": 60
},
{
"epoch": 0.030345239279673666,
"grad_norm": 0.8311784267425537,
"learning_rate": 4.375275977590104e-05,
"loss": 6.5813,
"step": 61
},
{
"epoch": 0.03084270221868471,
"grad_norm": 0.9207125902175903,
"learning_rate": 4.117647429676387e-05,
"loss": 6.5121,
"step": 62
},
{
"epoch": 0.03134016515769575,
"grad_norm": 1.0499930381774902,
"learning_rate": 3.885683198330941e-05,
"loss": 6.7942,
"step": 63
},
{
"epoch": 0.031837628096706795,
"grad_norm": 0.9650680422782898,
"learning_rate": 3.679864064845691e-05,
"loss": 6.8169,
"step": 64
},
{
"epoch": 0.03233509103571784,
"grad_norm": 0.8432676792144775,
"learning_rate": 3.500616620804712e-05,
"loss": 6.7807,
"step": 65
},
{
"epoch": 0.03283255397472888,
"grad_norm": 1.2601343393325806,
"learning_rate": 3.348312383908033e-05,
"loss": 6.7331,
"step": 66
},
{
"epoch": 0.03333001691373993,
"grad_norm": 0.5202540755271912,
"learning_rate": 3.223267027944483e-05,
"loss": 6.7119,
"step": 67
},
{
"epoch": 0.03382747985275097,
"grad_norm": 0.8801518678665161,
"learning_rate": 3.125739728509535e-05,
"loss": 6.5942,
"step": 68
},
{
"epoch": 0.034324942791762014,
"grad_norm": 0.9333907961845398,
"learning_rate": 3.055932625824328e-05,
"loss": 6.5063,
"step": 69
},
{
"epoch": 0.03482240573077306,
"grad_norm": 0.7383935451507568,
"learning_rate": 3.0139904057691777e-05,
"loss": 6.6695,
"step": 70
},
{
"epoch": 0.0353198686697841,
"grad_norm": 0.9811359643936157,
"learning_rate": 2.9999999999999997e-05,
"loss": 6.8281,
"step": 71
}
],
"logging_steps": 1,
"max_steps": 71,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7767234478080.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}