farmery's picture
Training in progress, step 99, checkpoint
cc5114b verified
{
"best_metric": 6.883584976196289,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 3.03988603988604,
"eval_steps": 25,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030389363722697058,
"grad_norm": 0.1357833743095398,
"learning_rate": 2.5e-05,
"loss": 6.9382,
"step": 1
},
{
"epoch": 0.030389363722697058,
"eval_loss": 6.938218593597412,
"eval_runtime": 0.0715,
"eval_samples_per_second": 699.748,
"eval_steps_per_second": 181.935,
"step": 1
},
{
"epoch": 0.060778727445394115,
"grad_norm": 0.13408163189888,
"learning_rate": 5e-05,
"loss": 6.9392,
"step": 2
},
{
"epoch": 0.09116809116809117,
"grad_norm": 0.14098231494426727,
"learning_rate": 7.500000000000001e-05,
"loss": 6.9395,
"step": 3
},
{
"epoch": 0.12155745489078823,
"grad_norm": 0.1429039090871811,
"learning_rate": 0.0001,
"loss": 6.9391,
"step": 4
},
{
"epoch": 0.15194681861348527,
"grad_norm": 0.13866113126277924,
"learning_rate": 9.997539658034168e-05,
"loss": 6.9388,
"step": 5
},
{
"epoch": 0.18233618233618235,
"grad_norm": 0.15095753967761993,
"learning_rate": 9.990161322484486e-05,
"loss": 6.9363,
"step": 6
},
{
"epoch": 0.2127255460588794,
"grad_norm": 0.16712909936904907,
"learning_rate": 9.977873061452552e-05,
"loss": 6.9343,
"step": 7
},
{
"epoch": 0.24311490978157646,
"grad_norm": 0.18588651716709137,
"learning_rate": 9.96068831197139e-05,
"loss": 6.9333,
"step": 8
},
{
"epoch": 0.27350427350427353,
"grad_norm": 0.13056699931621552,
"learning_rate": 9.938625865312251e-05,
"loss": 6.9341,
"step": 9
},
{
"epoch": 0.30389363722697055,
"grad_norm": 0.13966338336467743,
"learning_rate": 9.911709846436641e-05,
"loss": 6.9323,
"step": 10
},
{
"epoch": 0.3342830009496676,
"grad_norm": 0.1428191214799881,
"learning_rate": 9.879969687616027e-05,
"loss": 6.9316,
"step": 11
},
{
"epoch": 0.3646723646723647,
"grad_norm": 0.14790380001068115,
"learning_rate": 9.84344009624807e-05,
"loss": 6.9306,
"step": 12
},
{
"epoch": 0.3950617283950617,
"grad_norm": 0.15512308478355408,
"learning_rate": 9.80216101690461e-05,
"loss": 6.9292,
"step": 13
},
{
"epoch": 0.4254510921177588,
"grad_norm": 0.16272898018360138,
"learning_rate": 9.756177587652856e-05,
"loss": 6.927,
"step": 14
},
{
"epoch": 0.45584045584045585,
"grad_norm": 0.16427749395370483,
"learning_rate": 9.705540090697575e-05,
"loss": 6.9266,
"step": 15
},
{
"epoch": 0.4862298195631529,
"grad_norm": 0.18800827860832214,
"learning_rate": 9.650303897398232e-05,
"loss": 6.9249,
"step": 16
},
{
"epoch": 0.51661918328585,
"grad_norm": 0.14342306554317474,
"learning_rate": 9.590529407721231e-05,
"loss": 6.9271,
"step": 17
},
{
"epoch": 0.5470085470085471,
"grad_norm": 0.14963504672050476,
"learning_rate": 9.526281984193436e-05,
"loss": 6.9258,
"step": 18
},
{
"epoch": 0.577397910731244,
"grad_norm": 0.16108295321464539,
"learning_rate": 9.4576318804292e-05,
"loss": 6.9243,
"step": 19
},
{
"epoch": 0.6077872744539411,
"grad_norm": 0.16339664161205292,
"learning_rate": 9.384654164309083e-05,
"loss": 6.9222,
"step": 20
},
{
"epoch": 0.6381766381766382,
"grad_norm": 0.168046772480011,
"learning_rate": 9.30742863589421e-05,
"loss": 6.9206,
"step": 21
},
{
"epoch": 0.6685660018993352,
"grad_norm": 0.1747824251651764,
"learning_rate": 9.226039740166091e-05,
"loss": 6.9185,
"step": 22
},
{
"epoch": 0.6989553656220323,
"grad_norm": 0.18280130624771118,
"learning_rate": 9.140576474687264e-05,
"loss": 6.9179,
"step": 23
},
{
"epoch": 0.7293447293447294,
"grad_norm": 0.21718308329582214,
"learning_rate": 9.051132292283771e-05,
"loss": 6.9149,
"step": 24
},
{
"epoch": 0.7597340930674265,
"grad_norm": 0.16105718910694122,
"learning_rate": 8.957804998855866e-05,
"loss": 6.9191,
"step": 25
},
{
"epoch": 0.7597340930674265,
"eval_loss": 6.916116714477539,
"eval_runtime": 0.0686,
"eval_samples_per_second": 729.3,
"eval_steps_per_second": 189.618,
"step": 25
},
{
"epoch": 0.7901234567901234,
"grad_norm": 0.1643819510936737,
"learning_rate": 8.860696646428693e-05,
"loss": 6.9186,
"step": 26
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.1819484382867813,
"learning_rate": 8.759913421559902e-05,
"loss": 6.9156,
"step": 27
},
{
"epoch": 0.8509021842355176,
"grad_norm": 0.17893028259277344,
"learning_rate": 8.655565529226198e-05,
"loss": 6.9141,
"step": 28
},
{
"epoch": 0.8812915479582146,
"grad_norm": 0.18618154525756836,
"learning_rate": 8.547767072315835e-05,
"loss": 6.9128,
"step": 29
},
{
"epoch": 0.9116809116809117,
"grad_norm": 0.18693046271800995,
"learning_rate": 8.436635926858759e-05,
"loss": 6.9101,
"step": 30
},
{
"epoch": 0.9420702754036088,
"grad_norm": 0.1917145699262619,
"learning_rate": 8.322293613130917e-05,
"loss": 6.9086,
"step": 31
},
{
"epoch": 0.9724596391263058,
"grad_norm": 0.22024141252040863,
"learning_rate": 8.204865162773613e-05,
"loss": 6.902,
"step": 32
},
{
"epoch": 1.01329534662868,
"grad_norm": 0.25598573684692383,
"learning_rate": 8.084478982073247e-05,
"loss": 9.9181,
"step": 33
},
{
"epoch": 1.043684710351377,
"grad_norm": 0.16839833557605743,
"learning_rate": 7.961266711550922e-05,
"loss": 6.8916,
"step": 34
},
{
"epoch": 1.074074074074074,
"grad_norm": 0.17936962842941284,
"learning_rate": 7.835363082015468e-05,
"loss": 6.959,
"step": 35
},
{
"epoch": 1.104463437796771,
"grad_norm": 0.18253256380558014,
"learning_rate": 7.706905767237288e-05,
"loss": 6.9025,
"step": 36
},
{
"epoch": 1.134852801519468,
"grad_norm": 0.185040682554245,
"learning_rate": 7.576035233404096e-05,
"loss": 6.9272,
"step": 37
},
{
"epoch": 1.1652421652421652,
"grad_norm": 0.19758865237236023,
"learning_rate": 7.442894585523218e-05,
"loss": 7.2086,
"step": 38
},
{
"epoch": 1.1956315289648622,
"grad_norm": 0.1904238611459732,
"learning_rate": 7.307629410938363e-05,
"loss": 6.8585,
"step": 39
},
{
"epoch": 1.2260208926875593,
"grad_norm": 0.2162361592054367,
"learning_rate": 7.170387620131993e-05,
"loss": 7.3381,
"step": 40
},
{
"epoch": 1.2564102564102564,
"grad_norm": 0.17022086679935455,
"learning_rate": 7.031319284987394e-05,
"loss": 6.1575,
"step": 41
},
{
"epoch": 1.2867996201329535,
"grad_norm": 0.16576489806175232,
"learning_rate": 6.890576474687263e-05,
"loss": 6.9278,
"step": 42
},
{
"epoch": 1.3171889838556505,
"grad_norm": 0.16799896955490112,
"learning_rate": 6.7483130894283e-05,
"loss": 6.9742,
"step": 43
},
{
"epoch": 1.3475783475783476,
"grad_norm": 0.16917121410369873,
"learning_rate": 6.604684692133597e-05,
"loss": 6.8591,
"step": 44
},
{
"epoch": 1.3779677113010447,
"grad_norm": 0.16244754195213318,
"learning_rate": 6.459848338346861e-05,
"loss": 6.9428,
"step": 45
},
{
"epoch": 1.4083570750237417,
"grad_norm": 0.1710527092218399,
"learning_rate": 6.313962404494496e-05,
"loss": 7.0195,
"step": 46
},
{
"epoch": 1.4387464387464388,
"grad_norm": 0.1634788066148758,
"learning_rate": 6.167186414703289e-05,
"loss": 6.8305,
"step": 47
},
{
"epoch": 1.4691358024691357,
"grad_norm": 0.1797107458114624,
"learning_rate": 6.019680866363139e-05,
"loss": 7.3825,
"step": 48
},
{
"epoch": 1.499525166191833,
"grad_norm": 0.1428283154964447,
"learning_rate": 5.8716070546254966e-05,
"loss": 6.246,
"step": 49
},
{
"epoch": 1.5299145299145298,
"grad_norm": 0.14042928814888,
"learning_rate": 5.7231268960295e-05,
"loss": 6.9065,
"step": 50
},
{
"epoch": 1.5299145299145298,
"eval_loss": 6.891157627105713,
"eval_runtime": 0.0711,
"eval_samples_per_second": 702.839,
"eval_steps_per_second": 182.738,
"step": 50
},
{
"epoch": 1.560303893637227,
"grad_norm": 0.13292936980724335,
"learning_rate": 5.574402751448614e-05,
"loss": 6.8319,
"step": 51
},
{
"epoch": 1.590693257359924,
"grad_norm": 0.14090725779533386,
"learning_rate": 5.425597248551387e-05,
"loss": 6.9562,
"step": 52
},
{
"epoch": 1.6210826210826212,
"grad_norm": 0.13612942397594452,
"learning_rate": 5.2768731039705e-05,
"loss": 6.8707,
"step": 53
},
{
"epoch": 1.651471984805318,
"grad_norm": 0.14546889066696167,
"learning_rate": 5.128392945374505e-05,
"loss": 7.2714,
"step": 54
},
{
"epoch": 1.6818613485280152,
"grad_norm": 0.13168302178382874,
"learning_rate": 4.980319133636863e-05,
"loss": 6.7413,
"step": 55
},
{
"epoch": 1.7122507122507122,
"grad_norm": 0.14720255136489868,
"learning_rate": 4.83281358529671e-05,
"loss": 7.3065,
"step": 56
},
{
"epoch": 1.7426400759734093,
"grad_norm": 0.12005792558193207,
"learning_rate": 4.686037595505507e-05,
"loss": 6.2386,
"step": 57
},
{
"epoch": 1.7730294396961064,
"grad_norm": 0.11440925300121307,
"learning_rate": 4.54015166165314e-05,
"loss": 6.8224,
"step": 58
},
{
"epoch": 1.8034188034188035,
"grad_norm": 0.11298578232526779,
"learning_rate": 4.395315307866405e-05,
"loss": 6.9501,
"step": 59
},
{
"epoch": 1.8338081671415005,
"grad_norm": 0.11483923345804214,
"learning_rate": 4.2516869105717004e-05,
"loss": 6.957,
"step": 60
},
{
"epoch": 1.8641975308641974,
"grad_norm": 0.11452340334653854,
"learning_rate": 4.109423525312738e-05,
"loss": 6.8562,
"step": 61
},
{
"epoch": 1.8945868945868947,
"grad_norm": 0.12229608744382858,
"learning_rate": 3.968680715012606e-05,
"loss": 7.0527,
"step": 62
},
{
"epoch": 1.9249762583095915,
"grad_norm": 0.11904427409172058,
"learning_rate": 3.829612379868006e-05,
"loss": 6.8479,
"step": 63
},
{
"epoch": 1.9553656220322888,
"grad_norm": 0.12863321602344513,
"learning_rate": 3.692370589061639e-05,
"loss": 7.3454,
"step": 64
},
{
"epoch": 1.9857549857549857,
"grad_norm": 0.13798318803310394,
"learning_rate": 3.557105414476782e-05,
"loss": 8.6353,
"step": 65
},
{
"epoch": 2.02659069325736,
"grad_norm": 0.10759957879781723,
"learning_rate": 3.423964766595906e-05,
"loss": 7.4987,
"step": 66
},
{
"epoch": 2.056980056980057,
"grad_norm": 0.10079392790794373,
"learning_rate": 3.293094232762715e-05,
"loss": 6.8864,
"step": 67
},
{
"epoch": 2.087369420702754,
"grad_norm": 0.09840377420186996,
"learning_rate": 3.164636917984534e-05,
"loss": 6.8948,
"step": 68
},
{
"epoch": 2.117758784425451,
"grad_norm": 0.09733422100543976,
"learning_rate": 3.0387332884490805e-05,
"loss": 6.8686,
"step": 69
},
{
"epoch": 2.148148148148148,
"grad_norm": 0.10290233790874481,
"learning_rate": 2.9155210179267546e-05,
"loss": 6.9335,
"step": 70
},
{
"epoch": 2.178537511870845,
"grad_norm": 0.10776888579130173,
"learning_rate": 2.7951348372263875e-05,
"loss": 7.0476,
"step": 71
},
{
"epoch": 2.208926875593542,
"grad_norm": 0.10383031517267227,
"learning_rate": 2.677706386869083e-05,
"loss": 6.762,
"step": 72
},
{
"epoch": 2.2393162393162394,
"grad_norm": 0.13066378235816956,
"learning_rate": 2.5633640731412412e-05,
"loss": 7.2873,
"step": 73
},
{
"epoch": 2.269705603038936,
"grad_norm": 0.0905405804514885,
"learning_rate": 2.4522329276841663e-05,
"loss": 6.4179,
"step": 74
},
{
"epoch": 2.3000949667616335,
"grad_norm": 0.09294768422842026,
"learning_rate": 2.3444344707738015e-05,
"loss": 6.8739,
"step": 75
},
{
"epoch": 2.3000949667616335,
"eval_loss": 6.883584976196289,
"eval_runtime": 0.069,
"eval_samples_per_second": 724.277,
"eval_steps_per_second": 188.312,
"step": 75
},
{
"epoch": 2.3304843304843303,
"grad_norm": 0.09302153438329697,
"learning_rate": 2.2400865784401e-05,
"loss": 6.88,
"step": 76
},
{
"epoch": 2.3608736942070276,
"grad_norm": 0.09038078039884567,
"learning_rate": 2.1393033535713093e-05,
"loss": 6.9026,
"step": 77
},
{
"epoch": 2.3912630579297245,
"grad_norm": 0.09370548278093338,
"learning_rate": 2.0421950011441354e-05,
"loss": 6.9711,
"step": 78
},
{
"epoch": 2.421652421652422,
"grad_norm": 0.09348244965076447,
"learning_rate": 1.9488677077162295e-05,
"loss": 6.8911,
"step": 79
},
{
"epoch": 2.4520417853751186,
"grad_norm": 0.09923094511032104,
"learning_rate": 1.8594235253127375e-05,
"loss": 6.91,
"step": 80
},
{
"epoch": 2.482431149097816,
"grad_norm": 0.12247008085250854,
"learning_rate": 1.77396025983391e-05,
"loss": 7.1999,
"step": 81
},
{
"epoch": 2.5128205128205128,
"grad_norm": 0.08389267325401306,
"learning_rate": 1.6925713641057904e-05,
"loss": 6.4161,
"step": 82
},
{
"epoch": 2.5432098765432096,
"grad_norm": 0.08750791847705841,
"learning_rate": 1.6153458356909176e-05,
"loss": 6.8745,
"step": 83
},
{
"epoch": 2.573599240265907,
"grad_norm": 0.08594594895839691,
"learning_rate": 1.5423681195707997e-05,
"loss": 6.9089,
"step": 84
},
{
"epoch": 2.603988603988604,
"grad_norm": 0.09055335074663162,
"learning_rate": 1.4737180158065644e-05,
"loss": 6.8928,
"step": 85
},
{
"epoch": 2.634377967711301,
"grad_norm": 0.09193196147680283,
"learning_rate": 1.4094705922787687e-05,
"loss": 6.8778,
"step": 86
},
{
"epoch": 2.664767331433998,
"grad_norm": 0.09884382039308548,
"learning_rate": 1.3496961026017687e-05,
"loss": 7.1493,
"step": 87
},
{
"epoch": 2.695156695156695,
"grad_norm": 0.09690383821725845,
"learning_rate": 1.2944599093024267e-05,
"loss": 6.6664,
"step": 88
},
{
"epoch": 2.725546058879392,
"grad_norm": 0.12492170184850693,
"learning_rate": 1.2438224123471442e-05,
"loss": 7.3624,
"step": 89
},
{
"epoch": 2.7559354226020893,
"grad_norm": 0.08144571632146835,
"learning_rate": 1.1978389830953907e-05,
"loss": 6.2938,
"step": 90
},
{
"epoch": 2.786324786324786,
"grad_norm": 0.08170946687459946,
"learning_rate": 1.1565599037519316e-05,
"loss": 6.8895,
"step": 91
},
{
"epoch": 2.8167141500474835,
"grad_norm": 0.08227747678756714,
"learning_rate": 1.1200303123839742e-05,
"loss": 6.9282,
"step": 92
},
{
"epoch": 2.8471035137701803,
"grad_norm": 0.08924560993909836,
"learning_rate": 1.088290153563358e-05,
"loss": 6.8725,
"step": 93
},
{
"epoch": 2.8774928774928776,
"grad_norm": 0.09153356403112411,
"learning_rate": 1.0613741346877497e-05,
"loss": 6.9255,
"step": 94
},
{
"epoch": 2.9078822412155745,
"grad_norm": 0.09689171612262726,
"learning_rate": 1.0393116880286118e-05,
"loss": 7.0523,
"step": 95
},
{
"epoch": 2.9382716049382713,
"grad_norm": 0.09343353658914566,
"learning_rate": 1.0221269385474488e-05,
"loss": 6.7672,
"step": 96
},
{
"epoch": 2.9686609686609686,
"grad_norm": 0.1134086549282074,
"learning_rate": 1.0098386775155147e-05,
"loss": 7.2979,
"step": 97
},
{
"epoch": 3.009496676163343,
"grad_norm": 0.11922775208950043,
"learning_rate": 1.0024603419658329e-05,
"loss": 9.4261,
"step": 98
},
{
"epoch": 3.03988603988604,
"grad_norm": 0.08165828138589859,
"learning_rate": 1e-05,
"loss": 6.7951,
"step": 99
}
],
"logging_steps": 1,
"max_steps": 99,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 11297795604480.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}