prxy5606's picture
Training in progress, step 100, checkpoint
edce97c verified
raw
history blame
18.8 kB
{
"best_metric": 2.735139846801758,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.01632586425043876,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016325864250438759,
"grad_norm": 0.8632163405418396,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.6076,
"step": 1
},
{
"epoch": 0.00016325864250438759,
"eval_loss": 3.252528667449951,
"eval_runtime": 363.3093,
"eval_samples_per_second": 28.395,
"eval_steps_per_second": 14.197,
"step": 1
},
{
"epoch": 0.00032651728500877517,
"grad_norm": 0.8164938688278198,
"learning_rate": 6.666666666666667e-06,
"loss": 2.7626,
"step": 2
},
{
"epoch": 0.0004897759275131628,
"grad_norm": 1.0135329961776733,
"learning_rate": 1e-05,
"loss": 3.2325,
"step": 3
},
{
"epoch": 0.0006530345700175503,
"grad_norm": 1.0819627046585083,
"learning_rate": 1.3333333333333333e-05,
"loss": 3.3302,
"step": 4
},
{
"epoch": 0.0008162932125219379,
"grad_norm": 1.0888584852218628,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.2608,
"step": 5
},
{
"epoch": 0.0009795518550263255,
"grad_norm": 1.1022908687591553,
"learning_rate": 2e-05,
"loss": 3.3397,
"step": 6
},
{
"epoch": 0.001142810497530713,
"grad_norm": 1.0631771087646484,
"learning_rate": 2.3333333333333336e-05,
"loss": 3.2714,
"step": 7
},
{
"epoch": 0.0013060691400351007,
"grad_norm": 1.1249130964279175,
"learning_rate": 2.6666666666666667e-05,
"loss": 3.3009,
"step": 8
},
{
"epoch": 0.0014693277825394882,
"grad_norm": 1.2283929586410522,
"learning_rate": 3e-05,
"loss": 3.3171,
"step": 9
},
{
"epoch": 0.0016325864250438759,
"grad_norm": 1.0188909769058228,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.7293,
"step": 10
},
{
"epoch": 0.0017958450675482633,
"grad_norm": 1.2337793111801147,
"learning_rate": 3.6666666666666666e-05,
"loss": 3.0187,
"step": 11
},
{
"epoch": 0.001959103710052651,
"grad_norm": 1.0160133838653564,
"learning_rate": 4e-05,
"loss": 2.897,
"step": 12
},
{
"epoch": 0.0021223623525570383,
"grad_norm": 0.9511106610298157,
"learning_rate": 4.3333333333333334e-05,
"loss": 3.2403,
"step": 13
},
{
"epoch": 0.002285620995061426,
"grad_norm": 0.9372708797454834,
"learning_rate": 4.666666666666667e-05,
"loss": 3.0862,
"step": 14
},
{
"epoch": 0.0024488796375658137,
"grad_norm": 0.7635182738304138,
"learning_rate": 5e-05,
"loss": 2.9518,
"step": 15
},
{
"epoch": 0.0026121382800702014,
"grad_norm": 0.9754545092582703,
"learning_rate": 5.333333333333333e-05,
"loss": 2.789,
"step": 16
},
{
"epoch": 0.0027753969225745886,
"grad_norm": 0.8830393552780151,
"learning_rate": 5.666666666666667e-05,
"loss": 2.8314,
"step": 17
},
{
"epoch": 0.0029386555650789763,
"grad_norm": 0.7772381901741028,
"learning_rate": 6e-05,
"loss": 2.6371,
"step": 18
},
{
"epoch": 0.003101914207583364,
"grad_norm": 1.0107795000076294,
"learning_rate": 6.333333333333333e-05,
"loss": 2.858,
"step": 19
},
{
"epoch": 0.0032651728500877517,
"grad_norm": 1.1810671091079712,
"learning_rate": 6.666666666666667e-05,
"loss": 3.2062,
"step": 20
},
{
"epoch": 0.003428431492592139,
"grad_norm": 0.9577175378799438,
"learning_rate": 7e-05,
"loss": 3.1488,
"step": 21
},
{
"epoch": 0.0035916901350965267,
"grad_norm": 0.898638904094696,
"learning_rate": 7.333333333333333e-05,
"loss": 3.3016,
"step": 22
},
{
"epoch": 0.0037549487776009144,
"grad_norm": 1.1327389478683472,
"learning_rate": 7.666666666666667e-05,
"loss": 2.8533,
"step": 23
},
{
"epoch": 0.003918207420105302,
"grad_norm": 0.9358575940132141,
"learning_rate": 8e-05,
"loss": 3.1331,
"step": 24
},
{
"epoch": 0.00408146606260969,
"grad_norm": 0.9725240468978882,
"learning_rate": 8.333333333333334e-05,
"loss": 2.9826,
"step": 25
},
{
"epoch": 0.004244724705114077,
"grad_norm": 0.9563072919845581,
"learning_rate": 8.666666666666667e-05,
"loss": 2.9238,
"step": 26
},
{
"epoch": 0.004407983347618464,
"grad_norm": 0.8429107069969177,
"learning_rate": 9e-05,
"loss": 2.8076,
"step": 27
},
{
"epoch": 0.004571241990122852,
"grad_norm": 0.938100278377533,
"learning_rate": 9.333333333333334e-05,
"loss": 2.9171,
"step": 28
},
{
"epoch": 0.00473450063262724,
"grad_norm": 1.0879822969436646,
"learning_rate": 9.666666666666667e-05,
"loss": 3.0946,
"step": 29
},
{
"epoch": 0.004897759275131627,
"grad_norm": 1.012676477432251,
"learning_rate": 0.0001,
"loss": 2.8535,
"step": 30
},
{
"epoch": 0.005061017917636015,
"grad_norm": 0.864611029624939,
"learning_rate": 9.994965332706573e-05,
"loss": 2.8619,
"step": 31
},
{
"epoch": 0.005224276560140403,
"grad_norm": 0.9068366885185242,
"learning_rate": 9.979871469976196e-05,
"loss": 2.7014,
"step": 32
},
{
"epoch": 0.00538753520264479,
"grad_norm": 1.1143074035644531,
"learning_rate": 9.954748808839674e-05,
"loss": 2.7219,
"step": 33
},
{
"epoch": 0.005550793845149177,
"grad_norm": 1.0027602910995483,
"learning_rate": 9.919647942993148e-05,
"loss": 3.1337,
"step": 34
},
{
"epoch": 0.005714052487653565,
"grad_norm": 0.9993074536323547,
"learning_rate": 9.874639560909117e-05,
"loss": 2.906,
"step": 35
},
{
"epoch": 0.005877311130157953,
"grad_norm": 0.9074757695198059,
"learning_rate": 9.819814303479267e-05,
"loss": 2.7418,
"step": 36
},
{
"epoch": 0.00604056977266234,
"grad_norm": 1.012048602104187,
"learning_rate": 9.755282581475769e-05,
"loss": 2.594,
"step": 37
},
{
"epoch": 0.006203828415166728,
"grad_norm": 0.8783692717552185,
"learning_rate": 9.681174353198687e-05,
"loss": 3.0345,
"step": 38
},
{
"epoch": 0.006367087057671116,
"grad_norm": 1.058866262435913,
"learning_rate": 9.597638862757255e-05,
"loss": 2.9908,
"step": 39
},
{
"epoch": 0.006530345700175503,
"grad_norm": 0.9666945338249207,
"learning_rate": 9.504844339512095e-05,
"loss": 2.8316,
"step": 40
},
{
"epoch": 0.00669360434267989,
"grad_norm": 0.8591721653938293,
"learning_rate": 9.40297765928369e-05,
"loss": 2.723,
"step": 41
},
{
"epoch": 0.006856862985184278,
"grad_norm": 0.9416791796684265,
"learning_rate": 9.292243968009331e-05,
"loss": 2.781,
"step": 42
},
{
"epoch": 0.007020121627688666,
"grad_norm": 1.2646563053131104,
"learning_rate": 9.172866268606513e-05,
"loss": 2.9002,
"step": 43
},
{
"epoch": 0.007183380270193053,
"grad_norm": 1.0307127237319946,
"learning_rate": 9.045084971874738e-05,
"loss": 2.8108,
"step": 44
},
{
"epoch": 0.007346638912697441,
"grad_norm": 1.0498608350753784,
"learning_rate": 8.90915741234015e-05,
"loss": 2.4317,
"step": 45
},
{
"epoch": 0.007509897555201829,
"grad_norm": 1.1443825960159302,
"learning_rate": 8.765357330018056e-05,
"loss": 2.7738,
"step": 46
},
{
"epoch": 0.007673156197706216,
"grad_norm": 1.4476778507232666,
"learning_rate": 8.613974319136958e-05,
"loss": 2.6264,
"step": 47
},
{
"epoch": 0.007836414840210604,
"grad_norm": 1.4269299507141113,
"learning_rate": 8.455313244934324e-05,
"loss": 2.4248,
"step": 48
},
{
"epoch": 0.007999673482714992,
"grad_norm": 1.6290944814682007,
"learning_rate": 8.289693629698564e-05,
"loss": 2.6149,
"step": 49
},
{
"epoch": 0.00816293212521938,
"grad_norm": 2.4885928630828857,
"learning_rate": 8.117449009293668e-05,
"loss": 2.1892,
"step": 50
},
{
"epoch": 0.00816293212521938,
"eval_loss": 2.8629980087280273,
"eval_runtime": 363.5686,
"eval_samples_per_second": 28.374,
"eval_steps_per_second": 14.187,
"step": 50
},
{
"epoch": 0.008326190767723767,
"grad_norm": 2.1676950454711914,
"learning_rate": 7.938926261462366e-05,
"loss": 2.8331,
"step": 51
},
{
"epoch": 0.008489449410228153,
"grad_norm": 1.6274971961975098,
"learning_rate": 7.754484907260513e-05,
"loss": 2.6935,
"step": 52
},
{
"epoch": 0.00865270805273254,
"grad_norm": 1.3204303979873657,
"learning_rate": 7.564496387029532e-05,
"loss": 2.797,
"step": 53
},
{
"epoch": 0.008815966695236929,
"grad_norm": 1.0049480199813843,
"learning_rate": 7.369343312364993e-05,
"loss": 3.0168,
"step": 54
},
{
"epoch": 0.008979225337741316,
"grad_norm": 1.075304388999939,
"learning_rate": 7.169418695587791e-05,
"loss": 3.2103,
"step": 55
},
{
"epoch": 0.009142483980245704,
"grad_norm": 0.8386501669883728,
"learning_rate": 6.965125158269619e-05,
"loss": 3.072,
"step": 56
},
{
"epoch": 0.009305742622750092,
"grad_norm": 0.8426527976989746,
"learning_rate": 6.756874120406714e-05,
"loss": 2.752,
"step": 57
},
{
"epoch": 0.00946900126525448,
"grad_norm": 0.8528467416763306,
"learning_rate": 6.545084971874738e-05,
"loss": 2.8089,
"step": 58
},
{
"epoch": 0.009632259907758867,
"grad_norm": 0.904664158821106,
"learning_rate": 6.330184227833376e-05,
"loss": 2.8771,
"step": 59
},
{
"epoch": 0.009795518550263255,
"grad_norm": 0.7004697322845459,
"learning_rate": 6.112604669781572e-05,
"loss": 2.6807,
"step": 60
},
{
"epoch": 0.009958777192767642,
"grad_norm": 0.7690675258636475,
"learning_rate": 5.8927844739931834e-05,
"loss": 2.9141,
"step": 61
},
{
"epoch": 0.01012203583527203,
"grad_norm": 0.7248542904853821,
"learning_rate": 5.6711663290882776e-05,
"loss": 2.8913,
"step": 62
},
{
"epoch": 0.010285294477776418,
"grad_norm": 0.617339015007019,
"learning_rate": 5.448196544517168e-05,
"loss": 2.8585,
"step": 63
},
{
"epoch": 0.010448553120280805,
"grad_norm": 0.7304785251617432,
"learning_rate": 5.2243241517525754e-05,
"loss": 2.6997,
"step": 64
},
{
"epoch": 0.010611811762785193,
"grad_norm": 0.8117228150367737,
"learning_rate": 5e-05,
"loss": 2.8935,
"step": 65
},
{
"epoch": 0.01077507040528958,
"grad_norm": 0.7086424827575684,
"learning_rate": 4.775675848247427e-05,
"loss": 2.969,
"step": 66
},
{
"epoch": 0.010938329047793967,
"grad_norm": 0.715423583984375,
"learning_rate": 4.551803455482833e-05,
"loss": 2.5869,
"step": 67
},
{
"epoch": 0.011101587690298355,
"grad_norm": 0.7727792263031006,
"learning_rate": 4.328833670911724e-05,
"loss": 2.8389,
"step": 68
},
{
"epoch": 0.011264846332802742,
"grad_norm": 0.6761236786842346,
"learning_rate": 4.107215526006817e-05,
"loss": 2.709,
"step": 69
},
{
"epoch": 0.01142810497530713,
"grad_norm": 0.7613105773925781,
"learning_rate": 3.887395330218429e-05,
"loss": 2.8757,
"step": 70
},
{
"epoch": 0.011591363617811518,
"grad_norm": 0.8669199347496033,
"learning_rate": 3.6698157721666246e-05,
"loss": 3.1531,
"step": 71
},
{
"epoch": 0.011754622260315905,
"grad_norm": 0.8836479783058167,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.7458,
"step": 72
},
{
"epoch": 0.011917880902820293,
"grad_norm": 0.778107225894928,
"learning_rate": 3.243125879593286e-05,
"loss": 2.7683,
"step": 73
},
{
"epoch": 0.01208113954532468,
"grad_norm": 0.6686323285102844,
"learning_rate": 3.0348748417303823e-05,
"loss": 2.4992,
"step": 74
},
{
"epoch": 0.012244398187829068,
"grad_norm": 0.8155844807624817,
"learning_rate": 2.8305813044122097e-05,
"loss": 2.8288,
"step": 75
},
{
"epoch": 0.012407656830333456,
"grad_norm": 0.7905011773109436,
"learning_rate": 2.630656687635007e-05,
"loss": 2.7207,
"step": 76
},
{
"epoch": 0.012570915472837844,
"grad_norm": 0.821061372756958,
"learning_rate": 2.43550361297047e-05,
"loss": 2.6178,
"step": 77
},
{
"epoch": 0.012734174115342231,
"grad_norm": 0.9048423767089844,
"learning_rate": 2.245515092739488e-05,
"loss": 2.8052,
"step": 78
},
{
"epoch": 0.01289743275784662,
"grad_norm": 0.7810814380645752,
"learning_rate": 2.061073738537635e-05,
"loss": 2.9147,
"step": 79
},
{
"epoch": 0.013060691400351007,
"grad_norm": 0.887985348701477,
"learning_rate": 1.8825509907063327e-05,
"loss": 2.681,
"step": 80
},
{
"epoch": 0.013223950042855395,
"grad_norm": 0.8398605585098267,
"learning_rate": 1.7103063703014372e-05,
"loss": 2.7676,
"step": 81
},
{
"epoch": 0.01338720868535978,
"grad_norm": 0.8217864036560059,
"learning_rate": 1.544686755065677e-05,
"loss": 2.7269,
"step": 82
},
{
"epoch": 0.013550467327864168,
"grad_norm": 0.9799962639808655,
"learning_rate": 1.3860256808630428e-05,
"loss": 2.7334,
"step": 83
},
{
"epoch": 0.013713725970368556,
"grad_norm": 0.7503747344017029,
"learning_rate": 1.2346426699819458e-05,
"loss": 2.6707,
"step": 84
},
{
"epoch": 0.013876984612872944,
"grad_norm": 0.9742613434791565,
"learning_rate": 1.090842587659851e-05,
"loss": 2.6392,
"step": 85
},
{
"epoch": 0.014040243255377331,
"grad_norm": 0.7584743499755859,
"learning_rate": 9.549150281252633e-06,
"loss": 2.6515,
"step": 86
},
{
"epoch": 0.014203501897881719,
"grad_norm": 0.7804672718048096,
"learning_rate": 8.271337313934869e-06,
"loss": 2.4738,
"step": 87
},
{
"epoch": 0.014366760540386107,
"grad_norm": 0.8059239983558655,
"learning_rate": 7.077560319906695e-06,
"loss": 2.494,
"step": 88
},
{
"epoch": 0.014530019182890494,
"grad_norm": 0.9126543402671814,
"learning_rate": 5.9702234071631e-06,
"loss": 2.6029,
"step": 89
},
{
"epoch": 0.014693277825394882,
"grad_norm": 0.8912916779518127,
"learning_rate": 4.951556604879048e-06,
"loss": 2.5753,
"step": 90
},
{
"epoch": 0.01485653646789927,
"grad_norm": 0.9568765163421631,
"learning_rate": 4.023611372427471e-06,
"loss": 2.3926,
"step": 91
},
{
"epoch": 0.015019795110403657,
"grad_norm": 1.004371166229248,
"learning_rate": 3.18825646801314e-06,
"loss": 2.7712,
"step": 92
},
{
"epoch": 0.015183053752908045,
"grad_norm": 1.0826361179351807,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.6105,
"step": 93
},
{
"epoch": 0.015346312395412433,
"grad_norm": 0.9342916011810303,
"learning_rate": 1.8018569652073381e-06,
"loss": 2.3885,
"step": 94
},
{
"epoch": 0.01550957103791682,
"grad_norm": 1.158676028251648,
"learning_rate": 1.2536043909088191e-06,
"loss": 2.4456,
"step": 95
},
{
"epoch": 0.015672829680421208,
"grad_norm": 1.0513296127319336,
"learning_rate": 8.035205700685167e-07,
"loss": 2.2801,
"step": 96
},
{
"epoch": 0.015836088322925594,
"grad_norm": 1.4424023628234863,
"learning_rate": 4.52511911603265e-07,
"loss": 2.6424,
"step": 97
},
{
"epoch": 0.015999346965429984,
"grad_norm": 1.7463353872299194,
"learning_rate": 2.012853002380466e-07,
"loss": 2.3797,
"step": 98
},
{
"epoch": 0.01616260560793437,
"grad_norm": 2.0830276012420654,
"learning_rate": 5.0346672934270534e-08,
"loss": 2.5996,
"step": 99
},
{
"epoch": 0.01632586425043876,
"grad_norm": 2.8903799057006836,
"learning_rate": 0.0,
"loss": 2.8615,
"step": 100
},
{
"epoch": 0.01632586425043876,
"eval_loss": 2.735139846801758,
"eval_runtime": 363.8754,
"eval_samples_per_second": 28.35,
"eval_steps_per_second": 14.175,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.728250925121536e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}