TracyTank's picture
Training in progress, step 200, checkpoint
7350981 verified
raw
history blame
36.5 kB
{
"best_metric": 0.8729397058486938,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.17486338797814208,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008743169398907104,
"grad_norm": 13.654485702514648,
"learning_rate": 3.3333333333333333e-06,
"loss": 7.2397,
"step": 1
},
{
"epoch": 0.0008743169398907104,
"eval_loss": 2.0816423892974854,
"eval_runtime": 234.4717,
"eval_samples_per_second": 32.857,
"eval_steps_per_second": 4.107,
"step": 1
},
{
"epoch": 0.0017486338797814208,
"grad_norm": 16.08323097229004,
"learning_rate": 6.666666666666667e-06,
"loss": 7.3886,
"step": 2
},
{
"epoch": 0.002622950819672131,
"grad_norm": 16.72583770751953,
"learning_rate": 1e-05,
"loss": 7.2155,
"step": 3
},
{
"epoch": 0.0034972677595628415,
"grad_norm": 16.019792556762695,
"learning_rate": 1.3333333333333333e-05,
"loss": 6.8998,
"step": 4
},
{
"epoch": 0.004371584699453552,
"grad_norm": 10.719884872436523,
"learning_rate": 1.6666666666666667e-05,
"loss": 6.1133,
"step": 5
},
{
"epoch": 0.005245901639344262,
"grad_norm": 99.27400207519531,
"learning_rate": 2e-05,
"loss": 10.2652,
"step": 6
},
{
"epoch": 0.006120218579234973,
"grad_norm": 85.72102355957031,
"learning_rate": 2.3333333333333336e-05,
"loss": 9.529,
"step": 7
},
{
"epoch": 0.006994535519125683,
"grad_norm": 15.085773468017578,
"learning_rate": 2.6666666666666667e-05,
"loss": 5.6537,
"step": 8
},
{
"epoch": 0.007868852459016393,
"grad_norm": 12.465982437133789,
"learning_rate": 3e-05,
"loss": 5.3902,
"step": 9
},
{
"epoch": 0.008743169398907104,
"grad_norm": 9.44223403930664,
"learning_rate": 3.3333333333333335e-05,
"loss": 5.2389,
"step": 10
},
{
"epoch": 0.009617486338797814,
"grad_norm": 7.920454025268555,
"learning_rate": 3.6666666666666666e-05,
"loss": 4.7169,
"step": 11
},
{
"epoch": 0.010491803278688525,
"grad_norm": 8.882831573486328,
"learning_rate": 4e-05,
"loss": 4.4305,
"step": 12
},
{
"epoch": 0.011366120218579235,
"grad_norm": 6.801806926727295,
"learning_rate": 4.3333333333333334e-05,
"loss": 4.3088,
"step": 13
},
{
"epoch": 0.012240437158469945,
"grad_norm": 7.269967555999756,
"learning_rate": 4.666666666666667e-05,
"loss": 5.6956,
"step": 14
},
{
"epoch": 0.013114754098360656,
"grad_norm": 6.725127220153809,
"learning_rate": 5e-05,
"loss": 5.209,
"step": 15
},
{
"epoch": 0.013989071038251366,
"grad_norm": 6.431244373321533,
"learning_rate": 5.333333333333333e-05,
"loss": 5.1324,
"step": 16
},
{
"epoch": 0.014863387978142076,
"grad_norm": 5.522418975830078,
"learning_rate": 5.666666666666667e-05,
"loss": 4.8785,
"step": 17
},
{
"epoch": 0.015737704918032787,
"grad_norm": 13.761688232421875,
"learning_rate": 6e-05,
"loss": 3.9461,
"step": 18
},
{
"epoch": 0.016612021857923497,
"grad_norm": 23.60154914855957,
"learning_rate": 6.333333333333333e-05,
"loss": 1.3604,
"step": 19
},
{
"epoch": 0.017486338797814208,
"grad_norm": 14.374363899230957,
"learning_rate": 6.666666666666667e-05,
"loss": 3.9006,
"step": 20
},
{
"epoch": 0.018360655737704918,
"grad_norm": 8.315186500549316,
"learning_rate": 7e-05,
"loss": 4.7994,
"step": 21
},
{
"epoch": 0.01923497267759563,
"grad_norm": 6.13319730758667,
"learning_rate": 7.333333333333333e-05,
"loss": 4.6416,
"step": 22
},
{
"epoch": 0.02010928961748634,
"grad_norm": 5.479979038238525,
"learning_rate": 7.666666666666667e-05,
"loss": 4.4633,
"step": 23
},
{
"epoch": 0.02098360655737705,
"grad_norm": 6.043623924255371,
"learning_rate": 8e-05,
"loss": 4.2832,
"step": 24
},
{
"epoch": 0.02185792349726776,
"grad_norm": 6.044895172119141,
"learning_rate": 8.333333333333334e-05,
"loss": 2.8783,
"step": 25
},
{
"epoch": 0.02273224043715847,
"grad_norm": 6.741650104522705,
"learning_rate": 8.666666666666667e-05,
"loss": 5.4952,
"step": 26
},
{
"epoch": 0.02360655737704918,
"grad_norm": 6.030343532562256,
"learning_rate": 9e-05,
"loss": 5.1832,
"step": 27
},
{
"epoch": 0.02448087431693989,
"grad_norm": 5.912640571594238,
"learning_rate": 9.333333333333334e-05,
"loss": 4.8941,
"step": 28
},
{
"epoch": 0.0253551912568306,
"grad_norm": 4.158953666687012,
"learning_rate": 9.666666666666667e-05,
"loss": 4.8204,
"step": 29
},
{
"epoch": 0.02622950819672131,
"grad_norm": 4.939655303955078,
"learning_rate": 0.0001,
"loss": 4.4874,
"step": 30
},
{
"epoch": 0.027103825136612022,
"grad_norm": 6.877912521362305,
"learning_rate": 9.999146252290264e-05,
"loss": 3.2959,
"step": 31
},
{
"epoch": 0.027978142076502732,
"grad_norm": 7.6934590339660645,
"learning_rate": 9.996585300715116e-05,
"loss": 1.1399,
"step": 32
},
{
"epoch": 0.028852459016393443,
"grad_norm": 5.374224662780762,
"learning_rate": 9.99231801983717e-05,
"loss": 4.0316,
"step": 33
},
{
"epoch": 0.029726775956284153,
"grad_norm": 4.416872978210449,
"learning_rate": 9.986345866928941e-05,
"loss": 4.449,
"step": 34
},
{
"epoch": 0.030601092896174863,
"grad_norm": 4.096227645874023,
"learning_rate": 9.978670881475172e-05,
"loss": 4.4694,
"step": 35
},
{
"epoch": 0.031475409836065574,
"grad_norm": 4.204520225524902,
"learning_rate": 9.96929568447637e-05,
"loss": 4.1511,
"step": 36
},
{
"epoch": 0.03234972677595629,
"grad_norm": 4.906490802764893,
"learning_rate": 9.958223477553714e-05,
"loss": 3.9031,
"step": 37
},
{
"epoch": 0.033224043715846995,
"grad_norm": 4.104410171508789,
"learning_rate": 9.94545804185573e-05,
"loss": 3.9166,
"step": 38
},
{
"epoch": 0.03409836065573771,
"grad_norm": 4.067167282104492,
"learning_rate": 9.931003736767013e-05,
"loss": 5.0196,
"step": 39
},
{
"epoch": 0.034972677595628415,
"grad_norm": 3.7131476402282715,
"learning_rate": 9.91486549841951e-05,
"loss": 4.8069,
"step": 40
},
{
"epoch": 0.03584699453551913,
"grad_norm": 3.895036220550537,
"learning_rate": 9.89704883800683e-05,
"loss": 4.7326,
"step": 41
},
{
"epoch": 0.036721311475409836,
"grad_norm": 3.369447946548462,
"learning_rate": 9.877559839902184e-05,
"loss": 4.5296,
"step": 42
},
{
"epoch": 0.03759562841530055,
"grad_norm": 3.4959802627563477,
"learning_rate": 9.85640515958057e-05,
"loss": 3.6448,
"step": 43
},
{
"epoch": 0.03846994535519126,
"grad_norm": 3.181074619293213,
"learning_rate": 9.833592021345937e-05,
"loss": 1.2202,
"step": 44
},
{
"epoch": 0.03934426229508197,
"grad_norm": 6.853686332702637,
"learning_rate": 9.809128215864097e-05,
"loss": 3.2771,
"step": 45
},
{
"epoch": 0.04021857923497268,
"grad_norm": 4.203513145446777,
"learning_rate": 9.783022097502204e-05,
"loss": 4.3558,
"step": 46
},
{
"epoch": 0.04109289617486339,
"grad_norm": 3.710810422897339,
"learning_rate": 9.755282581475769e-05,
"loss": 4.2005,
"step": 47
},
{
"epoch": 0.0419672131147541,
"grad_norm": 4.0080156326293945,
"learning_rate": 9.725919140804099e-05,
"loss": 4.1753,
"step": 48
},
{
"epoch": 0.04284153005464481,
"grad_norm": 4.323927402496338,
"learning_rate": 9.694941803075283e-05,
"loss": 4.033,
"step": 49
},
{
"epoch": 0.04371584699453552,
"grad_norm": 5.3773722648620605,
"learning_rate": 9.662361147021779e-05,
"loss": 3.0192,
"step": 50
},
{
"epoch": 0.04371584699453552,
"eval_loss": 0.959044873714447,
"eval_runtime": 235.9651,
"eval_samples_per_second": 32.649,
"eval_steps_per_second": 4.081,
"step": 50
},
{
"epoch": 0.04459016393442623,
"grad_norm": 4.682861804962158,
"learning_rate": 9.628188298907782e-05,
"loss": 5.3309,
"step": 51
},
{
"epoch": 0.04546448087431694,
"grad_norm": 3.6267170906066895,
"learning_rate": 9.592434928729616e-05,
"loss": 4.896,
"step": 52
},
{
"epoch": 0.046338797814207654,
"grad_norm": 2.9397659301757812,
"learning_rate": 9.555113246230442e-05,
"loss": 4.594,
"step": 53
},
{
"epoch": 0.04721311475409836,
"grad_norm": 3.7889232635498047,
"learning_rate": 9.516235996730645e-05,
"loss": 4.6179,
"step": 54
},
{
"epoch": 0.048087431693989074,
"grad_norm": 3.4238991737365723,
"learning_rate": 9.475816456775313e-05,
"loss": 4.3046,
"step": 55
},
{
"epoch": 0.04896174863387978,
"grad_norm": 6.835710048675537,
"learning_rate": 9.43386842960031e-05,
"loss": 1.9497,
"step": 56
},
{
"epoch": 0.049836065573770495,
"grad_norm": 6.923791885375977,
"learning_rate": 9.39040624041849e-05,
"loss": 2.179,
"step": 57
},
{
"epoch": 0.0507103825136612,
"grad_norm": 4.5648932456970215,
"learning_rate": 9.345444731527642e-05,
"loss": 4.3407,
"step": 58
},
{
"epoch": 0.051584699453551916,
"grad_norm": 3.808917284011841,
"learning_rate": 9.298999257241863e-05,
"loss": 4.3087,
"step": 59
},
{
"epoch": 0.05245901639344262,
"grad_norm": 3.44407320022583,
"learning_rate": 9.251085678648072e-05,
"loss": 4.0944,
"step": 60
},
{
"epoch": 0.05333333333333334,
"grad_norm": 3.4684829711914062,
"learning_rate": 9.201720358189464e-05,
"loss": 3.9452,
"step": 61
},
{
"epoch": 0.054207650273224044,
"grad_norm": 4.555397033691406,
"learning_rate": 9.150920154077754e-05,
"loss": 3.8704,
"step": 62
},
{
"epoch": 0.05508196721311476,
"grad_norm": 3.6485466957092285,
"learning_rate": 9.098702414536107e-05,
"loss": 3.9606,
"step": 63
},
{
"epoch": 0.055956284153005464,
"grad_norm": 3.0883123874664307,
"learning_rate": 9.045084971874738e-05,
"loss": 5.0169,
"step": 64
},
{
"epoch": 0.05683060109289618,
"grad_norm": 2.731473684310913,
"learning_rate": 8.9900861364012e-05,
"loss": 4.6796,
"step": 65
},
{
"epoch": 0.057704918032786885,
"grad_norm": 3.1372900009155273,
"learning_rate": 8.933724690167417e-05,
"loss": 4.5271,
"step": 66
},
{
"epoch": 0.0585792349726776,
"grad_norm": 2.9355735778808594,
"learning_rate": 8.876019880555649e-05,
"loss": 4.2805,
"step": 67
},
{
"epoch": 0.059453551912568306,
"grad_norm": 2.6107537746429443,
"learning_rate": 8.816991413705516e-05,
"loss": 3.3194,
"step": 68
},
{
"epoch": 0.06032786885245902,
"grad_norm": 9.008191108703613,
"learning_rate": 8.756659447784368e-05,
"loss": 1.2466,
"step": 69
},
{
"epoch": 0.06120218579234973,
"grad_norm": 3.1812331676483154,
"learning_rate": 8.695044586103296e-05,
"loss": 3.4345,
"step": 70
},
{
"epoch": 0.06207650273224044,
"grad_norm": 3.384528636932373,
"learning_rate": 8.632167870081121e-05,
"loss": 4.3751,
"step": 71
},
{
"epoch": 0.06295081967213115,
"grad_norm": 2.8234190940856934,
"learning_rate": 8.568050772058762e-05,
"loss": 4.2436,
"step": 72
},
{
"epoch": 0.06382513661202185,
"grad_norm": 3.3963890075683594,
"learning_rate": 8.502715187966455e-05,
"loss": 4.0892,
"step": 73
},
{
"epoch": 0.06469945355191258,
"grad_norm": 3.8028182983398438,
"learning_rate": 8.436183429846313e-05,
"loss": 3.8655,
"step": 74
},
{
"epoch": 0.06557377049180328,
"grad_norm": 4.164633274078369,
"learning_rate": 8.368478218232787e-05,
"loss": 3.1239,
"step": 75
},
{
"epoch": 0.06644808743169399,
"grad_norm": 2.5254907608032227,
"learning_rate": 8.299622674393614e-05,
"loss": 5.017,
"step": 76
},
{
"epoch": 0.0673224043715847,
"grad_norm": 2.449016809463501,
"learning_rate": 8.229640312433937e-05,
"loss": 4.8344,
"step": 77
},
{
"epoch": 0.06819672131147542,
"grad_norm": 2.4585683345794678,
"learning_rate": 8.158555031266254e-05,
"loss": 4.6238,
"step": 78
},
{
"epoch": 0.06907103825136612,
"grad_norm": 2.7618460655212402,
"learning_rate": 8.086391106448965e-05,
"loss": 4.4138,
"step": 79
},
{
"epoch": 0.06994535519125683,
"grad_norm": 2.7431366443634033,
"learning_rate": 8.013173181896283e-05,
"loss": 4.3351,
"step": 80
},
{
"epoch": 0.07081967213114754,
"grad_norm": 2.915708541870117,
"learning_rate": 7.938926261462366e-05,
"loss": 2.7006,
"step": 81
},
{
"epoch": 0.07169398907103826,
"grad_norm": 6.260775566101074,
"learning_rate": 7.863675700402526e-05,
"loss": 1.3999,
"step": 82
},
{
"epoch": 0.07256830601092897,
"grad_norm": 3.342338800430298,
"learning_rate": 7.787447196714427e-05,
"loss": 3.8281,
"step": 83
},
{
"epoch": 0.07344262295081967,
"grad_norm": 3.0507147312164307,
"learning_rate": 7.710266782362247e-05,
"loss": 4.256,
"step": 84
},
{
"epoch": 0.07431693989071038,
"grad_norm": 3.881892681121826,
"learning_rate": 7.63216081438678e-05,
"loss": 4.2829,
"step": 85
},
{
"epoch": 0.0751912568306011,
"grad_norm": 3.3180391788482666,
"learning_rate": 7.553155965904535e-05,
"loss": 3.8737,
"step": 86
},
{
"epoch": 0.0760655737704918,
"grad_norm": 3.3876357078552246,
"learning_rate": 7.473279216998895e-05,
"loss": 3.9718,
"step": 87
},
{
"epoch": 0.07693989071038251,
"grad_norm": 5.030557155609131,
"learning_rate": 7.392557845506432e-05,
"loss": 3.6383,
"step": 88
},
{
"epoch": 0.07781420765027322,
"grad_norm": 2.6808958053588867,
"learning_rate": 7.311019417701566e-05,
"loss": 4.9168,
"step": 89
},
{
"epoch": 0.07868852459016394,
"grad_norm": 2.51533842086792,
"learning_rate": 7.228691778882693e-05,
"loss": 4.7042,
"step": 90
},
{
"epoch": 0.07956284153005465,
"grad_norm": 2.41265869140625,
"learning_rate": 7.145603043863045e-05,
"loss": 4.3564,
"step": 91
},
{
"epoch": 0.08043715846994536,
"grad_norm": 2.5100467205047607,
"learning_rate": 7.061781587369519e-05,
"loss": 4.3584,
"step": 92
},
{
"epoch": 0.08131147540983606,
"grad_norm": 2.604959487915039,
"learning_rate": 6.977256034352712e-05,
"loss": 4.0976,
"step": 93
},
{
"epoch": 0.08218579234972678,
"grad_norm": 5.754001617431641,
"learning_rate": 6.892055250211552e-05,
"loss": 0.9558,
"step": 94
},
{
"epoch": 0.08306010928961749,
"grad_norm": 3.077754020690918,
"learning_rate": 6.806208330935766e-05,
"loss": 3.4805,
"step": 95
},
{
"epoch": 0.0839344262295082,
"grad_norm": 2.9729459285736084,
"learning_rate": 6.719744593169641e-05,
"loss": 4.2485,
"step": 96
},
{
"epoch": 0.0848087431693989,
"grad_norm": 2.972970962524414,
"learning_rate": 6.632693564200416e-05,
"loss": 4.1844,
"step": 97
},
{
"epoch": 0.08568306010928962,
"grad_norm": 2.8650026321411133,
"learning_rate": 6.545084971874738e-05,
"loss": 3.9211,
"step": 98
},
{
"epoch": 0.08655737704918033,
"grad_norm": 3.496151924133301,
"learning_rate": 6.456948734446624e-05,
"loss": 3.981,
"step": 99
},
{
"epoch": 0.08743169398907104,
"grad_norm": 4.358485698699951,
"learning_rate": 6.368314950360415e-05,
"loss": 3.0869,
"step": 100
},
{
"epoch": 0.08743169398907104,
"eval_loss": 0.922777533531189,
"eval_runtime": 236.35,
"eval_samples_per_second": 32.596,
"eval_steps_per_second": 4.074,
"step": 100
},
{
"epoch": 0.08830601092896175,
"grad_norm": 2.5874650478363037,
"learning_rate": 6.279213887972179e-05,
"loss": 4.9147,
"step": 101
},
{
"epoch": 0.08918032786885247,
"grad_norm": 2.5007927417755127,
"learning_rate": 6.189675975213094e-05,
"loss": 4.7943,
"step": 102
},
{
"epoch": 0.09005464480874317,
"grad_norm": 2.5372159481048584,
"learning_rate": 6.099731789198344e-05,
"loss": 4.6289,
"step": 103
},
{
"epoch": 0.09092896174863388,
"grad_norm": 2.671329975128174,
"learning_rate": 6.009412045785051e-05,
"loss": 4.3382,
"step": 104
},
{
"epoch": 0.09180327868852459,
"grad_norm": 2.382216691970825,
"learning_rate": 5.918747589082853e-05,
"loss": 4.4252,
"step": 105
},
{
"epoch": 0.09267759562841531,
"grad_norm": 3.9462063312530518,
"learning_rate": 5.82776938092065e-05,
"loss": 1.7697,
"step": 106
},
{
"epoch": 0.09355191256830601,
"grad_norm": 3.019932985305786,
"learning_rate": 5.736508490273188e-05,
"loss": 2.4262,
"step": 107
},
{
"epoch": 0.09442622950819672,
"grad_norm": 3.332878589630127,
"learning_rate": 5.644996082651017e-05,
"loss": 4.2257,
"step": 108
},
{
"epoch": 0.09530054644808743,
"grad_norm": 3.230023145675659,
"learning_rate": 5.553263409457504e-05,
"loss": 4.1084,
"step": 109
},
{
"epoch": 0.09617486338797815,
"grad_norm": 2.87276029586792,
"learning_rate": 5.4613417973165106e-05,
"loss": 3.9562,
"step": 110
},
{
"epoch": 0.09704918032786886,
"grad_norm": 3.271235227584839,
"learning_rate": 5.3692626373743706e-05,
"loss": 4.0525,
"step": 111
},
{
"epoch": 0.09792349726775956,
"grad_norm": 3.6405093669891357,
"learning_rate": 5.27705737457985e-05,
"loss": 3.5851,
"step": 112
},
{
"epoch": 0.09879781420765027,
"grad_norm": 2.578488349914551,
"learning_rate": 5.184757496945726e-05,
"loss": 3.237,
"step": 113
},
{
"epoch": 0.09967213114754099,
"grad_norm": 2.469647169113159,
"learning_rate": 5.092394524795649e-05,
"loss": 4.771,
"step": 114
},
{
"epoch": 0.1005464480874317,
"grad_norm": 2.3568105697631836,
"learning_rate": 5e-05,
"loss": 4.6737,
"step": 115
},
{
"epoch": 0.1014207650273224,
"grad_norm": 2.3874001502990723,
"learning_rate": 4.907605475204352e-05,
"loss": 4.5127,
"step": 116
},
{
"epoch": 0.10229508196721311,
"grad_norm": 2.6388347148895264,
"learning_rate": 4.8152425030542766e-05,
"loss": 4.1676,
"step": 117
},
{
"epoch": 0.10316939890710383,
"grad_norm": 3.294583320617676,
"learning_rate": 4.72294262542015e-05,
"loss": 4.1412,
"step": 118
},
{
"epoch": 0.10404371584699454,
"grad_norm": 6.407454967498779,
"learning_rate": 4.6307373626256306e-05,
"loss": 1.1808,
"step": 119
},
{
"epoch": 0.10491803278688525,
"grad_norm": 4.188323020935059,
"learning_rate": 4.5386582026834906e-05,
"loss": 2.9939,
"step": 120
},
{
"epoch": 0.10579234972677595,
"grad_norm": 2.9078667163848877,
"learning_rate": 4.446736590542497e-05,
"loss": 4.1543,
"step": 121
},
{
"epoch": 0.10666666666666667,
"grad_norm": 2.980272054672241,
"learning_rate": 4.3550039173489845e-05,
"loss": 4.0966,
"step": 122
},
{
"epoch": 0.10754098360655738,
"grad_norm": 2.8403475284576416,
"learning_rate": 4.2634915097268115e-05,
"loss": 3.9875,
"step": 123
},
{
"epoch": 0.10841530054644809,
"grad_norm": 3.1479694843292236,
"learning_rate": 4.1722306190793495e-05,
"loss": 3.698,
"step": 124
},
{
"epoch": 0.1092896174863388,
"grad_norm": 2.990969657897949,
"learning_rate": 4.0812524109171476e-05,
"loss": 2.8012,
"step": 125
},
{
"epoch": 0.11016393442622952,
"grad_norm": 2.2755610942840576,
"learning_rate": 3.99058795421495e-05,
"loss": 4.8265,
"step": 126
},
{
"epoch": 0.11103825136612022,
"grad_norm": 2.31410551071167,
"learning_rate": 3.9002682108016585e-05,
"loss": 4.6448,
"step": 127
},
{
"epoch": 0.11191256830601093,
"grad_norm": 2.463662624359131,
"learning_rate": 3.8103240247869075e-05,
"loss": 4.5084,
"step": 128
},
{
"epoch": 0.11278688524590164,
"grad_norm": 2.496032238006592,
"learning_rate": 3.720786112027822e-05,
"loss": 4.4348,
"step": 129
},
{
"epoch": 0.11366120218579236,
"grad_norm": 2.351569890975952,
"learning_rate": 3.631685049639586e-05,
"loss": 4.1546,
"step": 130
},
{
"epoch": 0.11453551912568306,
"grad_norm": 3.018563747406006,
"learning_rate": 3.543051265553377e-05,
"loss": 1.6488,
"step": 131
},
{
"epoch": 0.11540983606557377,
"grad_norm": 3.038093090057373,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.8737,
"step": 132
},
{
"epoch": 0.11628415300546448,
"grad_norm": 2.8064956665039062,
"learning_rate": 3.367306435799584e-05,
"loss": 4.0904,
"step": 133
},
{
"epoch": 0.1171584699453552,
"grad_norm": 2.8191850185394287,
"learning_rate": 3.2802554068303596e-05,
"loss": 4.0826,
"step": 134
},
{
"epoch": 0.1180327868852459,
"grad_norm": 2.920180559158325,
"learning_rate": 3.1937916690642356e-05,
"loss": 4.1058,
"step": 135
},
{
"epoch": 0.11890710382513661,
"grad_norm": 3.0269107818603516,
"learning_rate": 3.107944749788449e-05,
"loss": 3.8935,
"step": 136
},
{
"epoch": 0.11978142076502732,
"grad_norm": 3.5120015144348145,
"learning_rate": 3.0227439656472877e-05,
"loss": 3.2984,
"step": 137
},
{
"epoch": 0.12065573770491804,
"grad_norm": 2.480536699295044,
"learning_rate": 2.9382184126304834e-05,
"loss": 3.3286,
"step": 138
},
{
"epoch": 0.12153005464480875,
"grad_norm": 2.186405658721924,
"learning_rate": 2.8543969561369556e-05,
"loss": 4.7104,
"step": 139
},
{
"epoch": 0.12240437158469945,
"grad_norm": 2.1782374382019043,
"learning_rate": 2.771308221117309e-05,
"loss": 4.5695,
"step": 140
},
{
"epoch": 0.12327868852459016,
"grad_norm": 2.4011337757110596,
"learning_rate": 2.688980582298435e-05,
"loss": 4.4038,
"step": 141
},
{
"epoch": 0.12415300546448088,
"grad_norm": 2.8499345779418945,
"learning_rate": 2.607442154493568e-05,
"loss": 4.3679,
"step": 142
},
{
"epoch": 0.1250273224043716,
"grad_norm": 2.359773635864258,
"learning_rate": 2.5267207830011068e-05,
"loss": 4.1744,
"step": 143
},
{
"epoch": 0.1259016393442623,
"grad_norm": 2.53583025932312,
"learning_rate": 2.446844034095466e-05,
"loss": 1.2425,
"step": 144
},
{
"epoch": 0.126775956284153,
"grad_norm": 2.228820323944092,
"learning_rate": 2.3678391856132204e-05,
"loss": 2.4322,
"step": 145
},
{
"epoch": 0.1276502732240437,
"grad_norm": 2.6025636196136475,
"learning_rate": 2.2897332176377528e-05,
"loss": 4.0954,
"step": 146
},
{
"epoch": 0.12852459016393442,
"grad_norm": 2.937838315963745,
"learning_rate": 2.2125528032855724e-05,
"loss": 4.0482,
"step": 147
},
{
"epoch": 0.12939890710382515,
"grad_norm": 2.7011358737945557,
"learning_rate": 2.136324299597474e-05,
"loss": 3.9028,
"step": 148
},
{
"epoch": 0.13027322404371586,
"grad_norm": 3.2519752979278564,
"learning_rate": 2.061073738537635e-05,
"loss": 3.5576,
"step": 149
},
{
"epoch": 0.13114754098360656,
"grad_norm": 3.854916572570801,
"learning_rate": 1.9868268181037185e-05,
"loss": 2.6951,
"step": 150
},
{
"epoch": 0.13114754098360656,
"eval_loss": 0.8762643337249756,
"eval_runtime": 236.558,
"eval_samples_per_second": 32.567,
"eval_steps_per_second": 4.071,
"step": 150
},
{
"epoch": 0.13202185792349727,
"grad_norm": 2.5905094146728516,
"learning_rate": 1.9136088935510362e-05,
"loss": 4.8717,
"step": 151
},
{
"epoch": 0.13289617486338798,
"grad_norm": 2.4038398265838623,
"learning_rate": 1.8414449687337464e-05,
"loss": 4.6608,
"step": 152
},
{
"epoch": 0.13377049180327868,
"grad_norm": 2.2000529766082764,
"learning_rate": 1.7703596875660645e-05,
"loss": 4.4426,
"step": 153
},
{
"epoch": 0.1346448087431694,
"grad_norm": 2.210831642150879,
"learning_rate": 1.700377325606388e-05,
"loss": 4.1486,
"step": 154
},
{
"epoch": 0.1355191256830601,
"grad_norm": 2.5839877128601074,
"learning_rate": 1.631521781767214e-05,
"loss": 4.1324,
"step": 155
},
{
"epoch": 0.13639344262295083,
"grad_norm": 2.3235881328582764,
"learning_rate": 1.5638165701536868e-05,
"loss": 3.3773,
"step": 156
},
{
"epoch": 0.13726775956284154,
"grad_norm": 3.597020149230957,
"learning_rate": 1.4972848120335453e-05,
"loss": 0.7716,
"step": 157
},
{
"epoch": 0.13814207650273225,
"grad_norm": 2.658891439437866,
"learning_rate": 1.4319492279412388e-05,
"loss": 3.7132,
"step": 158
},
{
"epoch": 0.13901639344262295,
"grad_norm": 2.7973034381866455,
"learning_rate": 1.3678321299188801e-05,
"loss": 3.9698,
"step": 159
},
{
"epoch": 0.13989071038251366,
"grad_norm": 2.676218032836914,
"learning_rate": 1.3049554138967051e-05,
"loss": 3.7384,
"step": 160
},
{
"epoch": 0.14076502732240437,
"grad_norm": 2.9465088844299316,
"learning_rate": 1.2433405522156332e-05,
"loss": 3.7877,
"step": 161
},
{
"epoch": 0.14163934426229507,
"grad_norm": 3.1927568912506104,
"learning_rate": 1.183008586294485e-05,
"loss": 3.6949,
"step": 162
},
{
"epoch": 0.14251366120218578,
"grad_norm": 3.284546375274658,
"learning_rate": 1.1239801194443506e-05,
"loss": 3.6657,
"step": 163
},
{
"epoch": 0.14338797814207652,
"grad_norm": 2.2003931999206543,
"learning_rate": 1.066275309832584e-05,
"loss": 4.6931,
"step": 164
},
{
"epoch": 0.14426229508196722,
"grad_norm": 2.172837972640991,
"learning_rate": 1.0099138635988026e-05,
"loss": 4.4003,
"step": 165
},
{
"epoch": 0.14513661202185793,
"grad_norm": 2.1838595867156982,
"learning_rate": 9.549150281252633e-06,
"loss": 4.1883,
"step": 166
},
{
"epoch": 0.14601092896174864,
"grad_norm": 2.1888182163238525,
"learning_rate": 9.012975854638949e-06,
"loss": 4.1322,
"step": 167
},
{
"epoch": 0.14688524590163934,
"grad_norm": 3.239513874053955,
"learning_rate": 8.490798459222476e-06,
"loss": 3.7772,
"step": 168
},
{
"epoch": 0.14775956284153005,
"grad_norm": 3.1013920307159424,
"learning_rate": 7.982796418105371e-06,
"loss": 0.8918,
"step": 169
},
{
"epoch": 0.14863387978142076,
"grad_norm": 2.655848503112793,
"learning_rate": 7.489143213519301e-06,
"loss": 2.7238,
"step": 170
},
{
"epoch": 0.14950819672131146,
"grad_norm": 2.58459734916687,
"learning_rate": 7.010007427581378e-06,
"loss": 4.0364,
"step": 171
},
{
"epoch": 0.1503825136612022,
"grad_norm": 2.6366162300109863,
"learning_rate": 6.5455526847235825e-06,
"loss": 3.8476,
"step": 172
},
{
"epoch": 0.1512568306010929,
"grad_norm": 2.7747936248779297,
"learning_rate": 6.0959375958151045e-06,
"loss": 4.0037,
"step": 173
},
{
"epoch": 0.1521311475409836,
"grad_norm": 3.180020809173584,
"learning_rate": 5.6613157039969055e-06,
"loss": 3.636,
"step": 174
},
{
"epoch": 0.15300546448087432,
"grad_norm": 3.588770627975464,
"learning_rate": 5.241835432246889e-06,
"loss": 2.8695,
"step": 175
},
{
"epoch": 0.15387978142076503,
"grad_norm": 2.042081356048584,
"learning_rate": 4.837640032693558e-06,
"loss": 4.6747,
"step": 176
},
{
"epoch": 0.15475409836065573,
"grad_norm": 2.1068990230560303,
"learning_rate": 4.448867537695578e-06,
"loss": 4.5457,
"step": 177
},
{
"epoch": 0.15562841530054644,
"grad_norm": 2.1162285804748535,
"learning_rate": 4.075650712703849e-06,
"loss": 4.4106,
"step": 178
},
{
"epoch": 0.15650273224043715,
"grad_norm": 2.2019155025482178,
"learning_rate": 3.71811701092219e-06,
"loss": 4.3258,
"step": 179
},
{
"epoch": 0.15737704918032788,
"grad_norm": 2.14262318611145,
"learning_rate": 3.376388529782215e-06,
"loss": 4.1658,
"step": 180
},
{
"epoch": 0.1582513661202186,
"grad_norm": 4.014858722686768,
"learning_rate": 3.0505819692471792e-06,
"loss": 1.7021,
"step": 181
},
{
"epoch": 0.1591256830601093,
"grad_norm": 2.9649558067321777,
"learning_rate": 2.7408085919590264e-06,
"loss": 1.7748,
"step": 182
},
{
"epoch": 0.16,
"grad_norm": 2.5048012733459473,
"learning_rate": 2.4471741852423237e-06,
"loss": 3.8896,
"step": 183
},
{
"epoch": 0.1608743169398907,
"grad_norm": 2.5167641639709473,
"learning_rate": 2.1697790249779636e-06,
"loss": 4.0373,
"step": 184
},
{
"epoch": 0.16174863387978142,
"grad_norm": 2.5736234188079834,
"learning_rate": 1.908717841359048e-06,
"loss": 3.9059,
"step": 185
},
{
"epoch": 0.16262295081967212,
"grad_norm": 2.9413957595825195,
"learning_rate": 1.6640797865406288e-06,
"loss": 3.7599,
"step": 186
},
{
"epoch": 0.16349726775956283,
"grad_norm": 3.0254104137420654,
"learning_rate": 1.4359484041943038e-06,
"loss": 3.6284,
"step": 187
},
{
"epoch": 0.16437158469945357,
"grad_norm": 2.965834617614746,
"learning_rate": 1.2244016009781701e-06,
"loss": 3.767,
"step": 188
},
{
"epoch": 0.16524590163934427,
"grad_norm": 2.0846035480499268,
"learning_rate": 1.0295116199317057e-06,
"loss": 4.5783,
"step": 189
},
{
"epoch": 0.16612021857923498,
"grad_norm": 2.029114007949829,
"learning_rate": 8.513450158049108e-07,
"loss": 4.5013,
"step": 190
},
{
"epoch": 0.1669945355191257,
"grad_norm": 2.152585506439209,
"learning_rate": 6.899626323298713e-07,
"loss": 4.3204,
"step": 191
},
{
"epoch": 0.1678688524590164,
"grad_norm": 2.176893711090088,
"learning_rate": 5.454195814427021e-07,
"loss": 4.1558,
"step": 192
},
{
"epoch": 0.1687431693989071,
"grad_norm": 2.565077304840088,
"learning_rate": 4.177652244628627e-07,
"loss": 3.9346,
"step": 193
},
{
"epoch": 0.1696174863387978,
"grad_norm": 2.8306026458740234,
"learning_rate": 3.0704315523631953e-07,
"loss": 0.7946,
"step": 194
},
{
"epoch": 0.17049180327868851,
"grad_norm": 2.9832398891448975,
"learning_rate": 2.1329118524827662e-07,
"loss": 2.1061,
"step": 195
},
{
"epoch": 0.17136612021857925,
"grad_norm": 2.5028891563415527,
"learning_rate": 1.3654133071059893e-07,
"loss": 3.9968,
"step": 196
},
{
"epoch": 0.17224043715846996,
"grad_norm": 2.7385191917419434,
"learning_rate": 7.681980162830282e-08,
"loss": 3.7562,
"step": 197
},
{
"epoch": 0.17311475409836066,
"grad_norm": 2.5910420417785645,
"learning_rate": 3.4146992848854695e-08,
"loss": 3.7834,
"step": 198
},
{
"epoch": 0.17398907103825137,
"grad_norm": 3.053351402282715,
"learning_rate": 8.537477097364522e-09,
"loss": 3.7311,
"step": 199
},
{
"epoch": 0.17486338797814208,
"grad_norm": 3.4200799465179443,
"learning_rate": 0.0,
"loss": 2.7523,
"step": 200
},
{
"epoch": 0.17486338797814208,
"eval_loss": 0.8729397058486938,
"eval_runtime": 238.5164,
"eval_samples_per_second": 32.3,
"eval_steps_per_second": 4.037,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7869607753115238e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}