Whispful's picture
Training in progress, step 76, checkpoint
f097cb3 verified
{
"best_metric": 0.9108405113220215,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.7188885604493054,
"eval_steps": 25,
"global_step": 76,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009459060005911913,
"grad_norm": 0.9699360728263855,
"learning_rate": 0.00015,
"loss": 1.3474,
"step": 1
},
{
"epoch": 0.009459060005911913,
"eval_loss": 1.4942742586135864,
"eval_runtime": 11.3224,
"eval_samples_per_second": 4.416,
"eval_steps_per_second": 4.416,
"step": 1
},
{
"epoch": 0.018918120011823827,
"grad_norm": 0.9874446392059326,
"learning_rate": 0.0003,
"loss": 1.4419,
"step": 2
},
{
"epoch": 0.02837718001773574,
"grad_norm": 0.8229495286941528,
"learning_rate": 0.000299878360437632,
"loss": 1.2491,
"step": 3
},
{
"epoch": 0.03783624002364765,
"grad_norm": 0.6731188297271729,
"learning_rate": 0.00029951366095324104,
"loss": 1.0968,
"step": 4
},
{
"epoch": 0.04729530002955956,
"grad_norm": 0.7858128547668457,
"learning_rate": 0.00029890655875994835,
"loss": 1.0792,
"step": 5
},
{
"epoch": 0.05675436003547148,
"grad_norm": 0.7995796799659729,
"learning_rate": 0.0002980581478969406,
"loss": 1.1033,
"step": 6
},
{
"epoch": 0.06621342004138339,
"grad_norm": 0.616423487663269,
"learning_rate": 0.00029696995725793764,
"loss": 0.942,
"step": 7
},
{
"epoch": 0.0756724800472953,
"grad_norm": 0.8099045753479004,
"learning_rate": 0.00029564394783602234,
"loss": 1.0823,
"step": 8
},
{
"epoch": 0.08513154005320721,
"grad_norm": 0.7018675208091736,
"learning_rate": 0.0002940825091897988,
"loss": 1.1322,
"step": 9
},
{
"epoch": 0.09459060005911912,
"grad_norm": 0.6773211359977722,
"learning_rate": 0.00029228845513724634,
"loss": 1.1547,
"step": 10
},
{
"epoch": 0.10404966006503104,
"grad_norm": 0.6189546585083008,
"learning_rate": 0.00029026501868502873,
"loss": 1.0361,
"step": 11
},
{
"epoch": 0.11350872007094295,
"grad_norm": 0.623130202293396,
"learning_rate": 0.0002880158462023983,
"loss": 1.0837,
"step": 12
},
{
"epoch": 0.12296778007685487,
"grad_norm": 0.55083829164505,
"learning_rate": 0.0002855449908501917,
"loss": 0.8752,
"step": 13
},
{
"epoch": 0.13242684008276678,
"grad_norm": 0.5632800459861755,
"learning_rate": 0.00028285690527676035,
"loss": 1.0141,
"step": 14
},
{
"epoch": 0.14188590008867868,
"grad_norm": 0.5698175430297852,
"learning_rate": 0.000279956433593997,
"loss": 0.9583,
"step": 15
},
{
"epoch": 0.1513449600945906,
"grad_norm": 0.6257957220077515,
"learning_rate": 0.00027684880264791867,
"loss": 1.1261,
"step": 16
},
{
"epoch": 0.16080402010050251,
"grad_norm": 0.6792426109313965,
"learning_rate": 0.00027353961259953696,
"loss": 1.076,
"step": 17
},
{
"epoch": 0.17026308010641442,
"grad_norm": 0.5756276249885559,
"learning_rate": 0.00027003482683298933,
"loss": 0.9779,
"step": 18
},
{
"epoch": 0.17972214011232635,
"grad_norm": 0.6053583025932312,
"learning_rate": 0.00026634076120911777,
"loss": 1.168,
"step": 19
},
{
"epoch": 0.18918120011823825,
"grad_norm": 0.5745776891708374,
"learning_rate": 0.0002624640726838608,
"loss": 0.997,
"step": 20
},
{
"epoch": 0.19864026012415018,
"grad_norm": 0.5367314219474792,
"learning_rate": 0.00025841174731196877,
"loss": 0.9194,
"step": 21
},
{
"epoch": 0.20809932013006208,
"grad_norm": 0.5325618386268616,
"learning_rate": 0.000254191087657661,
"loss": 1.0697,
"step": 22
},
{
"epoch": 0.21755838013597398,
"grad_norm": 0.6518679857254028,
"learning_rate": 0.0002498096996349117,
"loss": 1.1852,
"step": 23
},
{
"epoch": 0.2270174401418859,
"grad_norm": 0.6570560336112976,
"learning_rate": 0.0002452754788010787,
"loss": 1.1949,
"step": 24
},
{
"epoch": 0.2364765001477978,
"grad_norm": 0.6248823404312134,
"learning_rate": 0.00024059659612857536,
"loss": 1.1139,
"step": 25
},
{
"epoch": 0.2364765001477978,
"eval_loss": 0.9684370160102844,
"eval_runtime": 11.3298,
"eval_samples_per_second": 4.413,
"eval_steps_per_second": 4.413,
"step": 25
},
{
"epoch": 0.24593556015370974,
"grad_norm": 0.6149452924728394,
"learning_rate": 0.00023578148328022626,
"loss": 1.2,
"step": 26
},
{
"epoch": 0.2553946201596216,
"grad_norm": 0.5431950688362122,
"learning_rate": 0.00023083881741484068,
"loss": 1.1115,
"step": 27
},
{
"epoch": 0.26485368016553357,
"grad_norm": 0.5499697923660278,
"learning_rate": 0.00022577750555038587,
"loss": 0.8515,
"step": 28
},
{
"epoch": 0.27431274017144547,
"grad_norm": 0.45134755969047546,
"learning_rate": 0.000220606668512939,
"loss": 0.8734,
"step": 29
},
{
"epoch": 0.28377180017735737,
"grad_norm": 0.539884626865387,
"learning_rate": 0.00021533562450034164,
"loss": 0.9292,
"step": 30
},
{
"epoch": 0.29323086018326927,
"grad_norm": 0.4615366458892822,
"learning_rate": 0.00020997387229017774,
"loss": 0.8075,
"step": 31
},
{
"epoch": 0.3026899201891812,
"grad_norm": 0.4927918612957001,
"learning_rate": 0.00020453107412233428,
"loss": 0.8667,
"step": 32
},
{
"epoch": 0.31214898019509313,
"grad_norm": 0.49951159954071045,
"learning_rate": 0.0001990170382869919,
"loss": 0.977,
"step": 33
},
{
"epoch": 0.32160804020100503,
"grad_norm": 0.4911053478717804,
"learning_rate": 0.00019344170144942302,
"loss": 0.8319,
"step": 34
},
{
"epoch": 0.33106710020691693,
"grad_norm": 0.5060461163520813,
"learning_rate": 0.00018781511074344962,
"loss": 0.9303,
"step": 35
},
{
"epoch": 0.34052616021282883,
"grad_norm": 0.554681658744812,
"learning_rate": 0.0001821474056658286,
"loss": 0.9563,
"step": 36
},
{
"epoch": 0.3499852202187408,
"grad_norm": 0.5921431183815002,
"learning_rate": 0.00017644879980419374,
"loss": 1.0516,
"step": 37
},
{
"epoch": 0.3594442802246527,
"grad_norm": 0.4988238215446472,
"learning_rate": 0.00017072956243148002,
"loss": 0.9642,
"step": 38
},
{
"epoch": 0.3689033402305646,
"grad_norm": 0.4554699659347534,
"learning_rate": 0.000165,
"loss": 0.8114,
"step": 39
},
{
"epoch": 0.3783624002364765,
"grad_norm": 0.4806513488292694,
"learning_rate": 0.00015927043756852,
"loss": 1.0024,
"step": 40
},
{
"epoch": 0.3878214602423884,
"grad_norm": 0.5541513562202454,
"learning_rate": 0.0001535512001958063,
"loss": 1.0891,
"step": 41
},
{
"epoch": 0.39728052024830035,
"grad_norm": 0.4677070081233978,
"learning_rate": 0.00014785259433417133,
"loss": 0.8085,
"step": 42
},
{
"epoch": 0.40673958025421225,
"grad_norm": 0.4648246467113495,
"learning_rate": 0.00014218488925655037,
"loss": 0.8908,
"step": 43
},
{
"epoch": 0.41619864026012415,
"grad_norm": 0.5301900506019592,
"learning_rate": 0.00013655829855057698,
"loss": 0.9752,
"step": 44
},
{
"epoch": 0.42565770026603605,
"grad_norm": 0.5299180746078491,
"learning_rate": 0.00013098296171300814,
"loss": 1.0093,
"step": 45
},
{
"epoch": 0.43511676027194796,
"grad_norm": 0.5281239151954651,
"learning_rate": 0.0001254689258776657,
"loss": 0.9709,
"step": 46
},
{
"epoch": 0.4445758202778599,
"grad_norm": 0.5601954460144043,
"learning_rate": 0.00012002612770982222,
"loss": 1.0698,
"step": 47
},
{
"epoch": 0.4540348802837718,
"grad_norm": 0.5613210201263428,
"learning_rate": 0.00011466437549965834,
"loss": 1.0451,
"step": 48
},
{
"epoch": 0.4634939402896837,
"grad_norm": 0.6079455018043518,
"learning_rate": 0.00010939333148706099,
"loss": 1.0703,
"step": 49
},
{
"epoch": 0.4729530002955956,
"grad_norm": 0.6424407362937927,
"learning_rate": 0.00010422249444961407,
"loss": 1.2829,
"step": 50
},
{
"epoch": 0.4729530002955956,
"eval_loss": 0.9108405113220215,
"eval_runtime": 11.325,
"eval_samples_per_second": 4.415,
"eval_steps_per_second": 4.415,
"step": 50
},
{
"epoch": 0.4824120603015075,
"grad_norm": 0.629723846912384,
"learning_rate": 9.916118258515936e-05,
"loss": 1.1712,
"step": 51
},
{
"epoch": 0.4918711203074195,
"grad_norm": 0.7443132996559143,
"learning_rate": 9.421851671977372e-05,
"loss": 1.1707,
"step": 52
},
{
"epoch": 0.5013301803133313,
"grad_norm": 0.46892011165618896,
"learning_rate": 8.940340387142462e-05,
"loss": 0.8093,
"step": 53
},
{
"epoch": 0.5107892403192432,
"grad_norm": 0.43437010049819946,
"learning_rate": 8.47245211989213e-05,
"loss": 0.8652,
"step": 54
},
{
"epoch": 0.5202483003251552,
"grad_norm": 0.3998465836048126,
"learning_rate": 8.019030036508827e-05,
"loss": 0.8075,
"step": 55
},
{
"epoch": 0.5297073603310671,
"grad_norm": 0.4319959282875061,
"learning_rate": 7.580891234233904e-05,
"loss": 0.8248,
"step": 56
},
{
"epoch": 0.539166420336979,
"grad_norm": 0.4736616015434265,
"learning_rate": 7.158825268803127e-05,
"loss": 0.9148,
"step": 57
},
{
"epoch": 0.5486254803428909,
"grad_norm": 0.3903331458568573,
"learning_rate": 6.75359273161392e-05,
"loss": 0.8549,
"step": 58
},
{
"epoch": 0.5580845403488028,
"grad_norm": 0.428145170211792,
"learning_rate": 6.365923879088219e-05,
"loss": 0.8272,
"step": 59
},
{
"epoch": 0.5675436003547147,
"grad_norm": 0.5180810689926147,
"learning_rate": 5.996517316701069e-05,
"loss": 0.9229,
"step": 60
},
{
"epoch": 0.5770026603606266,
"grad_norm": 0.47326382994651794,
"learning_rate": 5.646038740046304e-05,
"loss": 0.768,
"step": 61
},
{
"epoch": 0.5864617203665385,
"grad_norm": 0.47181880474090576,
"learning_rate": 5.315119735208132e-05,
"loss": 0.973,
"step": 62
},
{
"epoch": 0.5959207803724504,
"grad_norm": 0.48134690523147583,
"learning_rate": 5.004356640600297e-05,
"loss": 0.8445,
"step": 63
},
{
"epoch": 0.6053798403783625,
"grad_norm": 0.4601708650588989,
"learning_rate": 4.71430947232396e-05,
"loss": 0.898,
"step": 64
},
{
"epoch": 0.6148389003842744,
"grad_norm": 0.4865597188472748,
"learning_rate": 4.4455009149808265e-05,
"loss": 0.922,
"step": 65
},
{
"epoch": 0.6242979603901863,
"grad_norm": 0.47632795572280884,
"learning_rate": 4.1984153797601665e-05,
"loss": 0.936,
"step": 66
},
{
"epoch": 0.6337570203960982,
"grad_norm": 0.5297707319259644,
"learning_rate": 3.9734981314971234e-05,
"loss": 0.9437,
"step": 67
},
{
"epoch": 0.6432160804020101,
"grad_norm": 0.46655818819999695,
"learning_rate": 3.771154486275363e-05,
"loss": 0.7435,
"step": 68
},
{
"epoch": 0.652675140407922,
"grad_norm": 0.513863205909729,
"learning_rate": 3.591749081020113e-05,
"loss": 0.9687,
"step": 69
},
{
"epoch": 0.6621342004138339,
"grad_norm": 0.5340255498886108,
"learning_rate": 3.435605216397765e-05,
"loss": 1.0353,
"step": 70
},
{
"epoch": 0.6715932604197458,
"grad_norm": 0.45835715532302856,
"learning_rate": 3.303004274206237e-05,
"loss": 0.864,
"step": 71
},
{
"epoch": 0.6810523204256577,
"grad_norm": 0.4819653034210205,
"learning_rate": 3.194185210305936e-05,
"loss": 1.0022,
"step": 72
},
{
"epoch": 0.6905113804315696,
"grad_norm": 0.5713886618614197,
"learning_rate": 3.1093441240051626e-05,
"loss": 1.0681,
"step": 73
},
{
"epoch": 0.6999704404374816,
"grad_norm": 0.5911141633987427,
"learning_rate": 3.048633904675892e-05,
"loss": 0.9129,
"step": 74
},
{
"epoch": 0.7094295004433935,
"grad_norm": 0.5698217749595642,
"learning_rate": 3.012163956236801e-05,
"loss": 1.0666,
"step": 75
},
{
"epoch": 0.7094295004433935,
"eval_loss": 0.8948501348495483,
"eval_runtime": 11.3345,
"eval_samples_per_second": 4.411,
"eval_steps_per_second": 4.411,
"step": 75
},
{
"epoch": 0.7188885604493054,
"grad_norm": 0.5917747616767883,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.0924,
"step": 76
}
],
"logging_steps": 1,
"max_steps": 76,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.002568490422108e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}