RoyJoy's picture
Training in progress, step 100, checkpoint
fd05968 verified
{
"best_metric": 1.2952526807785034,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.24848578971890045,
"eval_steps": 25,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0024848578971890046,
"grad_norm": 0.18750134110450745,
"learning_rate": 2e-05,
"loss": 1.1205,
"step": 1
},
{
"epoch": 0.0024848578971890046,
"eval_loss": 1.5502378940582275,
"eval_runtime": 1.16,
"eval_samples_per_second": 43.105,
"eval_steps_per_second": 11.207,
"step": 1
},
{
"epoch": 0.004969715794378009,
"grad_norm": 0.2050493061542511,
"learning_rate": 4e-05,
"loss": 1.1432,
"step": 2
},
{
"epoch": 0.007454573691567014,
"grad_norm": 0.23336094617843628,
"learning_rate": 6e-05,
"loss": 1.1906,
"step": 3
},
{
"epoch": 0.009939431588756018,
"grad_norm": 0.2642175257205963,
"learning_rate": 8e-05,
"loss": 1.2138,
"step": 4
},
{
"epoch": 0.012424289485945022,
"grad_norm": 0.2873069941997528,
"learning_rate": 0.0001,
"loss": 1.2386,
"step": 5
},
{
"epoch": 0.014909147383134027,
"grad_norm": 0.3289160430431366,
"learning_rate": 9.997539658034168e-05,
"loss": 1.3354,
"step": 6
},
{
"epoch": 0.017394005280323033,
"grad_norm": 0.3904762864112854,
"learning_rate": 9.990161322484486e-05,
"loss": 1.3982,
"step": 7
},
{
"epoch": 0.019878863177512036,
"grad_norm": 0.5013667941093445,
"learning_rate": 9.977873061452552e-05,
"loss": 1.4638,
"step": 8
},
{
"epoch": 0.02236372107470104,
"grad_norm": 0.7098448872566223,
"learning_rate": 9.96068831197139e-05,
"loss": 1.6263,
"step": 9
},
{
"epoch": 0.024848578971890044,
"grad_norm": 0.9447751641273499,
"learning_rate": 9.938625865312251e-05,
"loss": 1.6602,
"step": 10
},
{
"epoch": 0.02733343686907905,
"grad_norm": 1.1918106079101562,
"learning_rate": 9.911709846436641e-05,
"loss": 1.7242,
"step": 11
},
{
"epoch": 0.029818294766268055,
"grad_norm": 1.6665799617767334,
"learning_rate": 9.879969687616027e-05,
"loss": 1.9561,
"step": 12
},
{
"epoch": 0.03230315266345706,
"grad_norm": 0.5675529837608337,
"learning_rate": 9.84344009624807e-05,
"loss": 1.0769,
"step": 13
},
{
"epoch": 0.034788010560646065,
"grad_norm": 0.7401061058044434,
"learning_rate": 9.80216101690461e-05,
"loss": 1.1726,
"step": 14
},
{
"epoch": 0.03727286845783507,
"grad_norm": 0.6895852088928223,
"learning_rate": 9.756177587652856e-05,
"loss": 1.1816,
"step": 15
},
{
"epoch": 0.03975772635502407,
"grad_norm": 0.6290579438209534,
"learning_rate": 9.705540090697575e-05,
"loss": 1.2386,
"step": 16
},
{
"epoch": 0.042242584252213077,
"grad_norm": 0.550385057926178,
"learning_rate": 9.650303897398232e-05,
"loss": 1.2365,
"step": 17
},
{
"epoch": 0.04472744214940208,
"grad_norm": 0.4426826238632202,
"learning_rate": 9.590529407721231e-05,
"loss": 1.2245,
"step": 18
},
{
"epoch": 0.047212300046591084,
"grad_norm": 0.3795057535171509,
"learning_rate": 9.526281984193436e-05,
"loss": 1.2883,
"step": 19
},
{
"epoch": 0.04969715794378009,
"grad_norm": 1.7804195880889893,
"learning_rate": 9.4576318804292e-05,
"loss": 1.3227,
"step": 20
},
{
"epoch": 0.05218201584096909,
"grad_norm": 0.46404874324798584,
"learning_rate": 9.384654164309083e-05,
"loss": 1.5023,
"step": 21
},
{
"epoch": 0.0546668737381581,
"grad_norm": 0.63600754737854,
"learning_rate": 9.30742863589421e-05,
"loss": 1.5474,
"step": 22
},
{
"epoch": 0.057151731635347106,
"grad_norm": 0.8300634622573853,
"learning_rate": 9.226039740166091e-05,
"loss": 1.5191,
"step": 23
},
{
"epoch": 0.05963658953253611,
"grad_norm": 1.0977858304977417,
"learning_rate": 9.140576474687264e-05,
"loss": 1.877,
"step": 24
},
{
"epoch": 0.06212144742972511,
"grad_norm": 1.9869602918624878,
"learning_rate": 9.051132292283771e-05,
"loss": 2.3241,
"step": 25
},
{
"epoch": 0.06212144742972511,
"eval_loss": 1.3513970375061035,
"eval_runtime": 1.193,
"eval_samples_per_second": 41.912,
"eval_steps_per_second": 10.897,
"step": 25
},
{
"epoch": 0.06460630532691412,
"grad_norm": 0.1354065090417862,
"learning_rate": 8.957804998855866e-05,
"loss": 1.0299,
"step": 26
},
{
"epoch": 0.06709116322410312,
"grad_norm": 0.14030440151691437,
"learning_rate": 8.860696646428693e-05,
"loss": 1.1064,
"step": 27
},
{
"epoch": 0.06957602112129213,
"grad_norm": 0.16048847138881683,
"learning_rate": 8.759913421559902e-05,
"loss": 1.1247,
"step": 28
},
{
"epoch": 0.07206087901848113,
"grad_norm": 0.16564105451107025,
"learning_rate": 8.655565529226198e-05,
"loss": 1.1864,
"step": 29
},
{
"epoch": 0.07454573691567014,
"grad_norm": 0.18728169798851013,
"learning_rate": 8.547767072315835e-05,
"loss": 1.1942,
"step": 30
},
{
"epoch": 0.07703059481285914,
"grad_norm": 0.21532979607582092,
"learning_rate": 8.436635926858759e-05,
"loss": 1.2285,
"step": 31
},
{
"epoch": 0.07951545271004815,
"grad_norm": 0.25429460406303406,
"learning_rate": 8.322293613130917e-05,
"loss": 1.2947,
"step": 32
},
{
"epoch": 0.08200031060723714,
"grad_norm": 0.3389357328414917,
"learning_rate": 8.204865162773613e-05,
"loss": 1.3498,
"step": 33
},
{
"epoch": 0.08448516850442615,
"grad_norm": 0.44248735904693604,
"learning_rate": 8.084478982073247e-05,
"loss": 1.4682,
"step": 34
},
{
"epoch": 0.08697002640161516,
"grad_norm": 0.5417070388793945,
"learning_rate": 7.961266711550922e-05,
"loss": 1.4128,
"step": 35
},
{
"epoch": 0.08945488429880416,
"grad_norm": 0.7389299869537354,
"learning_rate": 7.835363082015468e-05,
"loss": 1.6869,
"step": 36
},
{
"epoch": 0.09193974219599317,
"grad_norm": 1.0323277711868286,
"learning_rate": 7.706905767237288e-05,
"loss": 1.7113,
"step": 37
},
{
"epoch": 0.09442460009318217,
"grad_norm": 0.17391887307167053,
"learning_rate": 7.576035233404096e-05,
"loss": 1.0076,
"step": 38
},
{
"epoch": 0.09690945799037118,
"grad_norm": 0.13513757288455963,
"learning_rate": 7.442894585523218e-05,
"loss": 1.1018,
"step": 39
},
{
"epoch": 0.09939431588756018,
"grad_norm": 0.1373761147260666,
"learning_rate": 7.307629410938363e-05,
"loss": 1.0876,
"step": 40
},
{
"epoch": 0.10187917378474919,
"grad_norm": 0.15485402941703796,
"learning_rate": 7.170387620131993e-05,
"loss": 1.154,
"step": 41
},
{
"epoch": 0.10436403168193818,
"grad_norm": 0.169663667678833,
"learning_rate": 7.031319284987394e-05,
"loss": 1.2091,
"step": 42
},
{
"epoch": 0.1068488895791272,
"grad_norm": 0.18936441838741302,
"learning_rate": 6.890576474687263e-05,
"loss": 1.2159,
"step": 43
},
{
"epoch": 0.1093337474763162,
"grad_norm": 0.22018368542194366,
"learning_rate": 6.7483130894283e-05,
"loss": 1.2628,
"step": 44
},
{
"epoch": 0.1118186053735052,
"grad_norm": 0.2780289053916931,
"learning_rate": 6.604684692133597e-05,
"loss": 1.3283,
"step": 45
},
{
"epoch": 0.11430346327069421,
"grad_norm": 0.35780683159828186,
"learning_rate": 6.459848338346861e-05,
"loss": 1.3937,
"step": 46
},
{
"epoch": 0.11678832116788321,
"grad_norm": 0.546146035194397,
"learning_rate": 6.313962404494496e-05,
"loss": 1.4614,
"step": 47
},
{
"epoch": 0.11927317906507222,
"grad_norm": 0.687628448009491,
"learning_rate": 6.167186414703289e-05,
"loss": 1.4176,
"step": 48
},
{
"epoch": 0.12175803696226122,
"grad_norm": 1.150036096572876,
"learning_rate": 6.019680866363139e-05,
"loss": 1.7219,
"step": 49
},
{
"epoch": 0.12424289485945023,
"grad_norm": 2.3322739601135254,
"learning_rate": 5.8716070546254966e-05,
"loss": 2.1274,
"step": 50
},
{
"epoch": 0.12424289485945023,
"eval_loss": 1.307538628578186,
"eval_runtime": 1.1894,
"eval_samples_per_second": 42.037,
"eval_steps_per_second": 10.93,
"step": 50
},
{
"epoch": 0.12672775275663922,
"grad_norm": 0.1314828097820282,
"learning_rate": 5.7231268960295e-05,
"loss": 1.0635,
"step": 51
},
{
"epoch": 0.12921261065382825,
"grad_norm": 0.14226771891117096,
"learning_rate": 5.574402751448614e-05,
"loss": 1.135,
"step": 52
},
{
"epoch": 0.13169746855101724,
"grad_norm": 0.16140902042388916,
"learning_rate": 5.425597248551387e-05,
"loss": 1.0796,
"step": 53
},
{
"epoch": 0.13418232644820624,
"grad_norm": 0.1799178272485733,
"learning_rate": 5.2768731039705e-05,
"loss": 1.1422,
"step": 54
},
{
"epoch": 0.13666718434539524,
"grad_norm": 0.2081109583377838,
"learning_rate": 5.128392945374505e-05,
"loss": 1.2189,
"step": 55
},
{
"epoch": 0.13915204224258426,
"grad_norm": 0.25022587180137634,
"learning_rate": 4.980319133636863e-05,
"loss": 1.2379,
"step": 56
},
{
"epoch": 0.14163690013977326,
"grad_norm": 0.28561145067214966,
"learning_rate": 4.83281358529671e-05,
"loss": 1.2227,
"step": 57
},
{
"epoch": 0.14412175803696226,
"grad_norm": 0.39062267541885376,
"learning_rate": 4.686037595505507e-05,
"loss": 1.4685,
"step": 58
},
{
"epoch": 0.14660661593415125,
"grad_norm": 0.44714751839637756,
"learning_rate": 4.54015166165314e-05,
"loss": 1.391,
"step": 59
},
{
"epoch": 0.14909147383134028,
"grad_norm": 0.5737501382827759,
"learning_rate": 4.395315307866405e-05,
"loss": 1.4773,
"step": 60
},
{
"epoch": 0.15157633172852927,
"grad_norm": 0.6805304884910583,
"learning_rate": 4.2516869105717004e-05,
"loss": 1.5433,
"step": 61
},
{
"epoch": 0.15406118962571827,
"grad_norm": 0.9832828044891357,
"learning_rate": 4.109423525312738e-05,
"loss": 1.7847,
"step": 62
},
{
"epoch": 0.1565460475229073,
"grad_norm": 0.14597496390342712,
"learning_rate": 3.968680715012606e-05,
"loss": 1.0738,
"step": 63
},
{
"epoch": 0.1590309054200963,
"grad_norm": 0.12474174797534943,
"learning_rate": 3.829612379868006e-05,
"loss": 1.0669,
"step": 64
},
{
"epoch": 0.1615157633172853,
"grad_norm": 0.13938696682453156,
"learning_rate": 3.692370589061639e-05,
"loss": 1.0996,
"step": 65
},
{
"epoch": 0.16400062121447428,
"grad_norm": 0.14947223663330078,
"learning_rate": 3.557105414476782e-05,
"loss": 1.1537,
"step": 66
},
{
"epoch": 0.1664854791116633,
"grad_norm": 0.15758228302001953,
"learning_rate": 3.423964766595906e-05,
"loss": 1.1751,
"step": 67
},
{
"epoch": 0.1689703370088523,
"grad_norm": 0.1946917325258255,
"learning_rate": 3.293094232762715e-05,
"loss": 1.2175,
"step": 68
},
{
"epoch": 0.1714551949060413,
"grad_norm": 0.22615738213062286,
"learning_rate": 3.164636917984534e-05,
"loss": 1.2667,
"step": 69
},
{
"epoch": 0.17394005280323033,
"grad_norm": 0.2879005968570709,
"learning_rate": 3.0387332884490805e-05,
"loss": 1.3261,
"step": 70
},
{
"epoch": 0.17642491070041932,
"grad_norm": 0.385571151971817,
"learning_rate": 2.9155210179267546e-05,
"loss": 1.3998,
"step": 71
},
{
"epoch": 0.17890976859760832,
"grad_norm": 0.4972687363624573,
"learning_rate": 2.7951348372263875e-05,
"loss": 1.4713,
"step": 72
},
{
"epoch": 0.18139462649479732,
"grad_norm": 0.6449961066246033,
"learning_rate": 2.677706386869083e-05,
"loss": 1.5434,
"step": 73
},
{
"epoch": 0.18387948439198634,
"grad_norm": 0.8163850903511047,
"learning_rate": 2.5633640731412412e-05,
"loss": 1.5494,
"step": 74
},
{
"epoch": 0.18636434228917534,
"grad_norm": 1.7719392776489258,
"learning_rate": 2.4522329276841663e-05,
"loss": 2.154,
"step": 75
},
{
"epoch": 0.18636434228917534,
"eval_loss": 1.3009710311889648,
"eval_runtime": 1.193,
"eval_samples_per_second": 41.91,
"eval_steps_per_second": 10.897,
"step": 75
},
{
"epoch": 0.18884920018636434,
"grad_norm": 0.11202926188707352,
"learning_rate": 2.3444344707738015e-05,
"loss": 1.0467,
"step": 76
},
{
"epoch": 0.19133405808355336,
"grad_norm": 0.12705056369304657,
"learning_rate": 2.2400865784401e-05,
"loss": 1.1197,
"step": 77
},
{
"epoch": 0.19381891598074236,
"grad_norm": 0.13754263520240784,
"learning_rate": 2.1393033535713093e-05,
"loss": 1.1236,
"step": 78
},
{
"epoch": 0.19630377387793135,
"grad_norm": 0.14906519651412964,
"learning_rate": 2.0421950011441354e-05,
"loss": 1.1776,
"step": 79
},
{
"epoch": 0.19878863177512035,
"grad_norm": 0.18295283615589142,
"learning_rate": 1.9488677077162295e-05,
"loss": 1.1789,
"step": 80
},
{
"epoch": 0.20127348967230937,
"grad_norm": 0.2125210464000702,
"learning_rate": 1.8594235253127375e-05,
"loss": 1.2862,
"step": 81
},
{
"epoch": 0.20375834756949837,
"grad_norm": 0.26508280634880066,
"learning_rate": 1.77396025983391e-05,
"loss": 1.2848,
"step": 82
},
{
"epoch": 0.20624320546668737,
"grad_norm": 0.35227707028388977,
"learning_rate": 1.6925713641057904e-05,
"loss": 1.4362,
"step": 83
},
{
"epoch": 0.20872806336387636,
"grad_norm": 0.44621512293815613,
"learning_rate": 1.6153458356909176e-05,
"loss": 1.4328,
"step": 84
},
{
"epoch": 0.2112129212610654,
"grad_norm": 0.5334073305130005,
"learning_rate": 1.5423681195707997e-05,
"loss": 1.3808,
"step": 85
},
{
"epoch": 0.2136977791582544,
"grad_norm": 0.6840953826904297,
"learning_rate": 1.4737180158065644e-05,
"loss": 1.5155,
"step": 86
},
{
"epoch": 0.21618263705544338,
"grad_norm": 1.0756030082702637,
"learning_rate": 1.4094705922787687e-05,
"loss": 1.8925,
"step": 87
},
{
"epoch": 0.2186674949526324,
"grad_norm": 0.15060825645923615,
"learning_rate": 1.3496961026017687e-05,
"loss": 1.0787,
"step": 88
},
{
"epoch": 0.2211523528498214,
"grad_norm": 0.12421015650033951,
"learning_rate": 1.2944599093024267e-05,
"loss": 1.075,
"step": 89
},
{
"epoch": 0.2236372107470104,
"grad_norm": 0.12722982466220856,
"learning_rate": 1.2438224123471442e-05,
"loss": 1.0812,
"step": 90
},
{
"epoch": 0.2261220686441994,
"grad_norm": 0.1435864120721817,
"learning_rate": 1.1978389830953907e-05,
"loss": 1.1586,
"step": 91
},
{
"epoch": 0.22860692654138842,
"grad_norm": 0.1644206941127777,
"learning_rate": 1.1565599037519316e-05,
"loss": 1.1476,
"step": 92
},
{
"epoch": 0.23109178443857742,
"grad_norm": 0.19284330308437347,
"learning_rate": 1.1200303123839742e-05,
"loss": 1.2184,
"step": 93
},
{
"epoch": 0.23357664233576642,
"grad_norm": 0.22463686764240265,
"learning_rate": 1.088290153563358e-05,
"loss": 1.3173,
"step": 94
},
{
"epoch": 0.23606150023295544,
"grad_norm": 0.2764645218849182,
"learning_rate": 1.0613741346877497e-05,
"loss": 1.3155,
"step": 95
},
{
"epoch": 0.23854635813014444,
"grad_norm": 0.3586321175098419,
"learning_rate": 1.0393116880286118e-05,
"loss": 1.4269,
"step": 96
},
{
"epoch": 0.24103121602733343,
"grad_norm": 0.4705638289451599,
"learning_rate": 1.0221269385474488e-05,
"loss": 1.3914,
"step": 97
},
{
"epoch": 0.24351607392452243,
"grad_norm": 0.6012833714485168,
"learning_rate": 1.0098386775155147e-05,
"loss": 1.5772,
"step": 98
},
{
"epoch": 0.24600093182171145,
"grad_norm": 0.7574616074562073,
"learning_rate": 1.0024603419658329e-05,
"loss": 1.5515,
"step": 99
},
{
"epoch": 0.24848578971890045,
"grad_norm": 1.7547069787979126,
"learning_rate": 1e-05,
"loss": 2.0062,
"step": 100
},
{
"epoch": 0.24848578971890045,
"eval_loss": 1.2952526807785034,
"eval_runtime": 1.2125,
"eval_samples_per_second": 41.238,
"eval_steps_per_second": 10.722,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.666832784162816e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}