fats-fme's picture
Training in progress, step 303, checkpoint
90b49c8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.197845249755142,
"eval_steps": 76,
"global_step": 303,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000652954619653934,
"grad_norm": 4.757781028747559,
"learning_rate": 4.000000000000001e-06,
"loss": 4.2047,
"step": 1
},
{
"epoch": 0.000652954619653934,
"eval_loss": NaN,
"eval_runtime": 704.7675,
"eval_samples_per_second": 3.661,
"eval_steps_per_second": 0.915,
"step": 1
},
{
"epoch": 0.001305909239307868,
"grad_norm": 4.874630928039551,
"learning_rate": 8.000000000000001e-06,
"loss": 4.1653,
"step": 2
},
{
"epoch": 0.0019588638589618022,
"grad_norm": 5.5883331298828125,
"learning_rate": 1.2e-05,
"loss": 4.1635,
"step": 3
},
{
"epoch": 0.002611818478615736,
"grad_norm": 5.3879313468933105,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.8569,
"step": 4
},
{
"epoch": 0.00326477309826967,
"grad_norm": 4.848824501037598,
"learning_rate": 2e-05,
"loss": 4.0147,
"step": 5
},
{
"epoch": 0.0039177277179236044,
"grad_norm": 5.3052978515625,
"learning_rate": 2.4e-05,
"loss": 3.7956,
"step": 6
},
{
"epoch": 0.004570682337577538,
"grad_norm": 6.782866954803467,
"learning_rate": 2.8000000000000003e-05,
"loss": 4.6771,
"step": 7
},
{
"epoch": 0.005223636957231472,
"grad_norm": 6.1247735023498535,
"learning_rate": 3.2000000000000005e-05,
"loss": 3.8201,
"step": 8
},
{
"epoch": 0.005876591576885406,
"grad_norm": 5.305412769317627,
"learning_rate": 3.6e-05,
"loss": 3.2011,
"step": 9
},
{
"epoch": 0.00652954619653934,
"grad_norm": 4.974228858947754,
"learning_rate": 4e-05,
"loss": 3.2488,
"step": 10
},
{
"epoch": 0.007182500816193275,
"grad_norm": 4.17997407913208,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.8667,
"step": 11
},
{
"epoch": 0.007835455435847209,
"grad_norm": 5.494494915008545,
"learning_rate": 4.8e-05,
"loss": 3.0199,
"step": 12
},
{
"epoch": 0.008488410055501142,
"grad_norm": 3.663966178894043,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.5652,
"step": 13
},
{
"epoch": 0.009141364675155077,
"grad_norm": 3.196805477142334,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.6467,
"step": 14
},
{
"epoch": 0.009794319294809012,
"grad_norm": 3.161735773086548,
"learning_rate": 6e-05,
"loss": 2.4477,
"step": 15
},
{
"epoch": 0.010447273914462945,
"grad_norm": 4.473456859588623,
"learning_rate": 6.400000000000001e-05,
"loss": 2.4943,
"step": 16
},
{
"epoch": 0.01110022853411688,
"grad_norm": 5.138904094696045,
"learning_rate": 6.800000000000001e-05,
"loss": 2.51,
"step": 17
},
{
"epoch": 0.011753183153770812,
"grad_norm": 5.232083320617676,
"learning_rate": 7.2e-05,
"loss": 2.3384,
"step": 18
},
{
"epoch": 0.012406137773424747,
"grad_norm": 5.67042875289917,
"learning_rate": 7.6e-05,
"loss": 2.3158,
"step": 19
},
{
"epoch": 0.01305909239307868,
"grad_norm": 6.414549350738525,
"learning_rate": 8e-05,
"loss": 2.2479,
"step": 20
},
{
"epoch": 0.013712047012732615,
"grad_norm": 6.064276218414307,
"learning_rate": 8.4e-05,
"loss": 2.689,
"step": 21
},
{
"epoch": 0.01436500163238655,
"grad_norm": 4.989445686340332,
"learning_rate": 8.800000000000001e-05,
"loss": 1.9927,
"step": 22
},
{
"epoch": 0.015017956252040483,
"grad_norm": 6.970681667327881,
"learning_rate": 9.200000000000001e-05,
"loss": 2.0652,
"step": 23
},
{
"epoch": 0.015670910871694418,
"grad_norm": 5.712936878204346,
"learning_rate": 9.6e-05,
"loss": 1.7749,
"step": 24
},
{
"epoch": 0.01632386549134835,
"grad_norm": 6.509803771972656,
"learning_rate": 0.0001,
"loss": 1.6381,
"step": 25
},
{
"epoch": 0.016976820111002284,
"grad_norm": 9.46315860748291,
"learning_rate": 0.00010400000000000001,
"loss": 2.9097,
"step": 26
},
{
"epoch": 0.01762977473065622,
"grad_norm": 7.60649299621582,
"learning_rate": 0.00010800000000000001,
"loss": 2.3185,
"step": 27
},
{
"epoch": 0.018282729350310153,
"grad_norm": 4.304985046386719,
"learning_rate": 0.00011200000000000001,
"loss": 1.8866,
"step": 28
},
{
"epoch": 0.018935683969964087,
"grad_norm": 4.42153787612915,
"learning_rate": 0.000116,
"loss": 1.9317,
"step": 29
},
{
"epoch": 0.019588638589618023,
"grad_norm": 4.993203163146973,
"learning_rate": 0.00012,
"loss": 1.963,
"step": 30
},
{
"epoch": 0.020241593209271956,
"grad_norm": 5.630634784698486,
"learning_rate": 0.000124,
"loss": 2.0134,
"step": 31
},
{
"epoch": 0.02089454782892589,
"grad_norm": 4.011257648468018,
"learning_rate": 0.00012800000000000002,
"loss": 1.5838,
"step": 32
},
{
"epoch": 0.021547502448579822,
"grad_norm": 4.430102348327637,
"learning_rate": 0.000132,
"loss": 1.567,
"step": 33
},
{
"epoch": 0.02220045706823376,
"grad_norm": 4.285562038421631,
"learning_rate": 0.00013600000000000003,
"loss": 1.5668,
"step": 34
},
{
"epoch": 0.022853411687887692,
"grad_norm": 4.76671028137207,
"learning_rate": 0.00014,
"loss": 1.4504,
"step": 35
},
{
"epoch": 0.023506366307541625,
"grad_norm": 3.6335926055908203,
"learning_rate": 0.000144,
"loss": 1.7653,
"step": 36
},
{
"epoch": 0.02415932092719556,
"grad_norm": 3.863640546798706,
"learning_rate": 0.000148,
"loss": 1.7619,
"step": 37
},
{
"epoch": 0.024812275546849494,
"grad_norm": 3.339837074279785,
"learning_rate": 0.000152,
"loss": 1.9767,
"step": 38
},
{
"epoch": 0.025465230166503428,
"grad_norm": 3.004544734954834,
"learning_rate": 0.00015600000000000002,
"loss": 1.954,
"step": 39
},
{
"epoch": 0.02611818478615736,
"grad_norm": 3.1160991191864014,
"learning_rate": 0.00016,
"loss": 2.2944,
"step": 40
},
{
"epoch": 0.026771139405811297,
"grad_norm": 2.9257686138153076,
"learning_rate": 0.000164,
"loss": 2.1756,
"step": 41
},
{
"epoch": 0.02742409402546523,
"grad_norm": 3.0116686820983887,
"learning_rate": 0.000168,
"loss": 2.2031,
"step": 42
},
{
"epoch": 0.028077048645119163,
"grad_norm": 3.1789779663085938,
"learning_rate": 0.000172,
"loss": 1.71,
"step": 43
},
{
"epoch": 0.0287300032647731,
"grad_norm": 3.2652573585510254,
"learning_rate": 0.00017600000000000002,
"loss": 2.108,
"step": 44
},
{
"epoch": 0.029382957884427033,
"grad_norm": 3.3591766357421875,
"learning_rate": 0.00018,
"loss": 2.252,
"step": 45
},
{
"epoch": 0.030035912504080966,
"grad_norm": 4.020256519317627,
"learning_rate": 0.00018400000000000003,
"loss": 2.0209,
"step": 46
},
{
"epoch": 0.0306888671237349,
"grad_norm": 3.1844100952148438,
"learning_rate": 0.000188,
"loss": 1.7205,
"step": 47
},
{
"epoch": 0.031341821743388835,
"grad_norm": 4.041150093078613,
"learning_rate": 0.000192,
"loss": 2.0322,
"step": 48
},
{
"epoch": 0.031994776363042765,
"grad_norm": 3.976959228515625,
"learning_rate": 0.000196,
"loss": 1.5729,
"step": 49
},
{
"epoch": 0.0326477309826967,
"grad_norm": 3.8643813133239746,
"learning_rate": 0.0002,
"loss": 1.3921,
"step": 50
},
{
"epoch": 0.03330068560235064,
"grad_norm": 4.523630142211914,
"learning_rate": 0.0001999922905547776,
"loss": 2.0141,
"step": 51
},
{
"epoch": 0.03395364022200457,
"grad_norm": 3.308136463165283,
"learning_rate": 0.0001999691634078213,
"loss": 1.6375,
"step": 52
},
{
"epoch": 0.034606594841658504,
"grad_norm": 3.70528244972229,
"learning_rate": 0.00019993062212508053,
"loss": 2.1218,
"step": 53
},
{
"epoch": 0.03525954946131244,
"grad_norm": 3.909076690673828,
"learning_rate": 0.0001998766726491935,
"loss": 1.7935,
"step": 54
},
{
"epoch": 0.03591250408096637,
"grad_norm": 3.2719497680664062,
"learning_rate": 0.00019980732329857076,
"loss": 1.4998,
"step": 55
},
{
"epoch": 0.03656545870062031,
"grad_norm": 4.357359886169434,
"learning_rate": 0.0001997225847661127,
"loss": 1.7119,
"step": 56
},
{
"epoch": 0.03721841332027424,
"grad_norm": 3.111414909362793,
"learning_rate": 0.00019962247011756081,
"loss": 1.3332,
"step": 57
},
{
"epoch": 0.03787136793992817,
"grad_norm": 3.3698601722717285,
"learning_rate": 0.00019950699478948309,
"loss": 1.3576,
"step": 58
},
{
"epoch": 0.03852432255958211,
"grad_norm": 4.0696024894714355,
"learning_rate": 0.00019937617658689384,
"loss": 1.2196,
"step": 59
},
{
"epoch": 0.039177277179236046,
"grad_norm": 3.984119176864624,
"learning_rate": 0.00019923003568050844,
"loss": 1.4652,
"step": 60
},
{
"epoch": 0.039830231798889976,
"grad_norm": 3.909024238586426,
"learning_rate": 0.00019906859460363307,
"loss": 1.2951,
"step": 61
},
{
"epoch": 0.04048318641854391,
"grad_norm": 3.550440788269043,
"learning_rate": 0.0001988918782486906,
"loss": 1.4699,
"step": 62
},
{
"epoch": 0.04113614103819784,
"grad_norm": 3.341071367263794,
"learning_rate": 0.0001986999138633821,
"loss": 2.1781,
"step": 63
},
{
"epoch": 0.04178909565785178,
"grad_norm": 2.938025951385498,
"learning_rate": 0.00019849273104648592,
"loss": 1.7088,
"step": 64
},
{
"epoch": 0.042442050277505715,
"grad_norm": 2.956080913543701,
"learning_rate": 0.00019827036174329353,
"loss": 2.0676,
"step": 65
},
{
"epoch": 0.043095004897159644,
"grad_norm": 2.349592924118042,
"learning_rate": 0.00019803284024068427,
"loss": 1.7783,
"step": 66
},
{
"epoch": 0.04374795951681358,
"grad_norm": 2.7854583263397217,
"learning_rate": 0.0001977802031618383,
"loss": 2.222,
"step": 67
},
{
"epoch": 0.04440091413646752,
"grad_norm": 3.6111695766448975,
"learning_rate": 0.00019751248946059014,
"loss": 2.0701,
"step": 68
},
{
"epoch": 0.04505386875612145,
"grad_norm": 3.226724863052368,
"learning_rate": 0.00019722974041542203,
"loss": 2.2502,
"step": 69
},
{
"epoch": 0.045706823375775384,
"grad_norm": 3.4755945205688477,
"learning_rate": 0.0001969319996230995,
"loss": 2.0837,
"step": 70
},
{
"epoch": 0.04635977799542932,
"grad_norm": 3.1673951148986816,
"learning_rate": 0.0001966193129919491,
"loss": 1.8692,
"step": 71
},
{
"epoch": 0.04701273261508325,
"grad_norm": 3.5966238975524902,
"learning_rate": 0.00019629172873477995,
"loss": 2.0036,
"step": 72
},
{
"epoch": 0.047665687234737186,
"grad_norm": 4.344339370727539,
"learning_rate": 0.00019594929736144976,
"loss": 1.963,
"step": 73
},
{
"epoch": 0.04831864185439112,
"grad_norm": 3.6855549812316895,
"learning_rate": 0.00019559207167107684,
"loss": 1.5932,
"step": 74
},
{
"epoch": 0.04897159647404505,
"grad_norm": 3.7931909561157227,
"learning_rate": 0.000195220106743899,
"loss": 1.272,
"step": 75
},
{
"epoch": 0.04962455109369899,
"grad_norm": 3.422001361846924,
"learning_rate": 0.00019483345993278093,
"loss": 1.8281,
"step": 76
},
{
"epoch": 0.04962455109369899,
"eval_loss": NaN,
"eval_runtime": 704.1079,
"eval_samples_per_second": 3.664,
"eval_steps_per_second": 0.916,
"step": 76
},
{
"epoch": 0.05027750571335292,
"grad_norm": 3.9176759719848633,
"learning_rate": 0.0001944321908543708,
"loss": 1.9556,
"step": 77
},
{
"epoch": 0.050930460333006855,
"grad_norm": 3.578507423400879,
"learning_rate": 0.00019401636137990816,
"loss": 2.025,
"step": 78
},
{
"epoch": 0.05158341495266079,
"grad_norm": 3.641873598098755,
"learning_rate": 0.00019358603562568416,
"loss": 1.667,
"step": 79
},
{
"epoch": 0.05223636957231472,
"grad_norm": 4.218084335327148,
"learning_rate": 0.0001931412799431554,
"loss": 1.797,
"step": 80
},
{
"epoch": 0.05288932419196866,
"grad_norm": 3.496741533279419,
"learning_rate": 0.0001926821629087133,
"loss": 1.5076,
"step": 81
},
{
"epoch": 0.053542278811622594,
"grad_norm": 5.606319427490234,
"learning_rate": 0.00019220875531311045,
"loss": 1.5796,
"step": 82
},
{
"epoch": 0.054195233431276524,
"grad_norm": 3.143007516860962,
"learning_rate": 0.00019172113015054532,
"loss": 1.4222,
"step": 83
},
{
"epoch": 0.05484818805093046,
"grad_norm": 3.359457015991211,
"learning_rate": 0.00019121936260740752,
"loss": 0.9389,
"step": 84
},
{
"epoch": 0.0555011426705844,
"grad_norm": 3.911376476287842,
"learning_rate": 0.00019070353005068484,
"loss": 1.8963,
"step": 85
},
{
"epoch": 0.056154097290238326,
"grad_norm": 3.542954444885254,
"learning_rate": 0.00019017371201603407,
"loss": 1.4677,
"step": 86
},
{
"epoch": 0.05680705190989226,
"grad_norm": 3.1694722175598145,
"learning_rate": 0.00018962999019551754,
"loss": 1.4803,
"step": 87
},
{
"epoch": 0.0574600065295462,
"grad_norm": 2.960282564163208,
"learning_rate": 0.00018907244842500704,
"loss": 1.8923,
"step": 88
},
{
"epoch": 0.05811296114920013,
"grad_norm": 2.6101479530334473,
"learning_rate": 0.00018850117267125738,
"loss": 1.9243,
"step": 89
},
{
"epoch": 0.058765915768854066,
"grad_norm": 3.0619993209838867,
"learning_rate": 0.00018791625101865117,
"loss": 2.1384,
"step": 90
},
{
"epoch": 0.059418870388507995,
"grad_norm": 2.6776371002197266,
"learning_rate": 0.0001873177736556172,
"loss": 1.7285,
"step": 91
},
{
"epoch": 0.06007182500816193,
"grad_norm": 3.687798023223877,
"learning_rate": 0.00018670583286072443,
"loss": 1.8332,
"step": 92
},
{
"epoch": 0.06072477962781587,
"grad_norm": 2.632847547531128,
"learning_rate": 0.0001860805229884536,
"loss": 1.8342,
"step": 93
},
{
"epoch": 0.0613777342474698,
"grad_norm": 3.5173568725585938,
"learning_rate": 0.00018544194045464886,
"loss": 2.004,
"step": 94
},
{
"epoch": 0.062030688867123734,
"grad_norm": 3.2944045066833496,
"learning_rate": 0.0001847901837216515,
"loss": 1.8861,
"step": 95
},
{
"epoch": 0.06268364348677767,
"grad_norm": 3.5300235748291016,
"learning_rate": 0.00018412535328311814,
"loss": 1.7608,
"step": 96
},
{
"epoch": 0.06333659810643161,
"grad_norm": 3.253826856613159,
"learning_rate": 0.0001834475516485257,
"loss": 1.9151,
"step": 97
},
{
"epoch": 0.06398955272608553,
"grad_norm": 3.243023633956909,
"learning_rate": 0.00018275688332736577,
"loss": 1.5671,
"step": 98
},
{
"epoch": 0.06464250734573947,
"grad_norm": 3.3818089962005615,
"learning_rate": 0.00018205345481302998,
"loss": 1.4077,
"step": 99
},
{
"epoch": 0.0652954619653934,
"grad_norm": 3.632511615753174,
"learning_rate": 0.00018133737456639044,
"loss": 1.0454,
"step": 100
},
{
"epoch": 0.06594841658504734,
"grad_norm": 3.6621978282928467,
"learning_rate": 0.0001806087529990758,
"loss": 2.0844,
"step": 101
},
{
"epoch": 0.06660137120470128,
"grad_norm": 5.99480676651001,
"learning_rate": 0.0001798677024564473,
"loss": 1.8015,
"step": 102
},
{
"epoch": 0.06725432582435521,
"grad_norm": 3.063887357711792,
"learning_rate": 0.00017911433720027624,
"loss": 1.7182,
"step": 103
},
{
"epoch": 0.06790728044400914,
"grad_norm": 3.2303333282470703,
"learning_rate": 0.00017834877339112612,
"loss": 1.6701,
"step": 104
},
{
"epoch": 0.06856023506366307,
"grad_norm": 7.370791435241699,
"learning_rate": 0.000177571129070442,
"loss": 1.6819,
"step": 105
},
{
"epoch": 0.06921318968331701,
"grad_norm": 3.4059948921203613,
"learning_rate": 0.00017678152414234968,
"loss": 1.3683,
"step": 106
},
{
"epoch": 0.06986614430297095,
"grad_norm": 4.130568504333496,
"learning_rate": 0.000175980080355168,
"loss": 1.6074,
"step": 107
},
{
"epoch": 0.07051909892262488,
"grad_norm": 4.288647651672363,
"learning_rate": 0.00017516692128263648,
"loss": 1.2521,
"step": 108
},
{
"epoch": 0.07117205354227882,
"grad_norm": 3.245211362838745,
"learning_rate": 0.00017434217230486164,
"loss": 1.3333,
"step": 109
},
{
"epoch": 0.07182500816193274,
"grad_norm": 3.7068018913269043,
"learning_rate": 0.00017350596058898483,
"loss": 1.287,
"step": 110
},
{
"epoch": 0.07247796278158668,
"grad_norm": 3.418928623199463,
"learning_rate": 0.0001726584150695744,
"loss": 1.3896,
"step": 111
},
{
"epoch": 0.07313091740124061,
"grad_norm": 3.3947291374206543,
"learning_rate": 0.00017179966642874543,
"loss": 1.7948,
"step": 112
},
{
"epoch": 0.07378387202089455,
"grad_norm": 2.770167589187622,
"learning_rate": 0.0001709298470760101,
"loss": 1.8008,
"step": 113
},
{
"epoch": 0.07443682664054849,
"grad_norm": 2.841723918914795,
"learning_rate": 0.00017004909112786144,
"loss": 1.7773,
"step": 114
},
{
"epoch": 0.07508978126020241,
"grad_norm": 3.010446071624756,
"learning_rate": 0.00016915753438709417,
"loss": 1.9485,
"step": 115
},
{
"epoch": 0.07574273587985635,
"grad_norm": 2.804893970489502,
"learning_rate": 0.00016825531432186543,
"loss": 1.8283,
"step": 116
},
{
"epoch": 0.07639569049951028,
"grad_norm": 2.607825517654419,
"learning_rate": 0.00016734257004449862,
"loss": 1.5881,
"step": 117
},
{
"epoch": 0.07704864511916422,
"grad_norm": 3.1926357746124268,
"learning_rate": 0.00016641944229003395,
"loss": 1.9909,
"step": 118
},
{
"epoch": 0.07770159973881816,
"grad_norm": 2.6762239933013916,
"learning_rate": 0.00016548607339452853,
"loss": 1.7493,
"step": 119
},
{
"epoch": 0.07835455435847209,
"grad_norm": 2.9282002449035645,
"learning_rate": 0.00016454260727310978,
"loss": 1.6987,
"step": 120
},
{
"epoch": 0.07900750897812601,
"grad_norm": 3.6605136394500732,
"learning_rate": 0.00016358918939778536,
"loss": 2.1916,
"step": 121
},
{
"epoch": 0.07966046359777995,
"grad_norm": 3.031012535095215,
"learning_rate": 0.00016262596677501297,
"loss": 1.9056,
"step": 122
},
{
"epoch": 0.08031341821743389,
"grad_norm": 3.2578601837158203,
"learning_rate": 0.0001616530879230335,
"loss": 1.5707,
"step": 123
},
{
"epoch": 0.08096637283708782,
"grad_norm": 3.2448766231536865,
"learning_rate": 0.00016067070284897137,
"loss": 1.43,
"step": 124
},
{
"epoch": 0.08161932745674176,
"grad_norm": 3.2631771564483643,
"learning_rate": 0.00015967896302570485,
"loss": 1.0851,
"step": 125
},
{
"epoch": 0.08227228207639568,
"grad_norm": 3.316664457321167,
"learning_rate": 0.0001586780213685108,
"loss": 1.9519,
"step": 126
},
{
"epoch": 0.08292523669604962,
"grad_norm": 2.7955403327941895,
"learning_rate": 0.00015766803221148673,
"loss": 1.6003,
"step": 127
},
{
"epoch": 0.08357819131570356,
"grad_norm": 3.2884178161621094,
"learning_rate": 0.0001566491512837543,
"loss": 1.7108,
"step": 128
},
{
"epoch": 0.0842311459353575,
"grad_norm": 3.472278356552124,
"learning_rate": 0.00015562153568544752,
"loss": 1.8399,
"step": 129
},
{
"epoch": 0.08488410055501143,
"grad_norm": 2.900644302368164,
"learning_rate": 0.00015458534386348966,
"loss": 1.6259,
"step": 130
},
{
"epoch": 0.08553705517466537,
"grad_norm": 3.018883228302002,
"learning_rate": 0.0001535407355871626,
"loss": 1.5246,
"step": 131
},
{
"epoch": 0.08619000979431929,
"grad_norm": 3.375364303588867,
"learning_rate": 0.00015248787192347196,
"loss": 1.6209,
"step": 132
},
{
"epoch": 0.08684296441397323,
"grad_norm": 2.9012420177459717,
"learning_rate": 0.00015142691521231267,
"loss": 1.2602,
"step": 133
},
{
"epoch": 0.08749591903362716,
"grad_norm": 4.421230316162109,
"learning_rate": 0.00015035802904143762,
"loss": 1.5445,
"step": 134
},
{
"epoch": 0.0881488736532811,
"grad_norm": 3.3842790126800537,
"learning_rate": 0.00014928137822123452,
"loss": 1.2163,
"step": 135
},
{
"epoch": 0.08880182827293504,
"grad_norm": 3.1379024982452393,
"learning_rate": 0.0001481971287593138,
"loss": 1.6229,
"step": 136
},
{
"epoch": 0.08945478289258897,
"grad_norm": 3.0401148796081543,
"learning_rate": 0.00014710544783491208,
"loss": 2.1325,
"step": 137
},
{
"epoch": 0.0901077375122429,
"grad_norm": 3.1100378036499023,
"learning_rate": 0.00014600650377311522,
"loss": 1.8436,
"step": 138
},
{
"epoch": 0.09076069213189683,
"grad_norm": 2.646742105484009,
"learning_rate": 0.00014490046601890405,
"loss": 2.0225,
"step": 139
},
{
"epoch": 0.09141364675155077,
"grad_norm": 2.7213666439056396,
"learning_rate": 0.00014378750511102826,
"loss": 1.9278,
"step": 140
},
{
"epoch": 0.0920666013712047,
"grad_norm": 2.6186165809631348,
"learning_rate": 0.00014266779265571087,
"loss": 2.1003,
"step": 141
},
{
"epoch": 0.09271955599085864,
"grad_norm": 2.469266653060913,
"learning_rate": 0.00014154150130018866,
"loss": 2.1259,
"step": 142
},
{
"epoch": 0.09337251061051256,
"grad_norm": 2.95566725730896,
"learning_rate": 0.00014040880470609187,
"loss": 2.0982,
"step": 143
},
{
"epoch": 0.0940254652301665,
"grad_norm": 3.408046007156372,
"learning_rate": 0.00013926987752266735,
"loss": 2.1553,
"step": 144
},
{
"epoch": 0.09467841984982044,
"grad_norm": 2.5797595977783203,
"learning_rate": 0.00013812489535984981,
"loss": 1.9652,
"step": 145
},
{
"epoch": 0.09533137446947437,
"grad_norm": 3.9302403926849365,
"learning_rate": 0.00013697403476118454,
"loss": 1.916,
"step": 146
},
{
"epoch": 0.09598432908912831,
"grad_norm": 3.0103251934051514,
"learning_rate": 0.0001358174731766064,
"loss": 1.5778,
"step": 147
},
{
"epoch": 0.09663728370878225,
"grad_norm": 3.0252418518066406,
"learning_rate": 0.00013465538893507907,
"loss": 1.862,
"step": 148
},
{
"epoch": 0.09729023832843617,
"grad_norm": 3.1504366397857666,
"learning_rate": 0.00013348796121709862,
"loss": 1.7159,
"step": 149
},
{
"epoch": 0.0979431929480901,
"grad_norm": 2.718940019607544,
"learning_rate": 0.00013231537002706594,
"loss": 1.1477,
"step": 150
},
{
"epoch": 0.09859614756774404,
"grad_norm": 3.6509456634521484,
"learning_rate": 0.0001311377961655319,
"loss": 1.6706,
"step": 151
},
{
"epoch": 0.09924910218739798,
"grad_norm": 3.2815134525299072,
"learning_rate": 0.00012995542120132017,
"loss": 1.4804,
"step": 152
},
{
"epoch": 0.09924910218739798,
"eval_loss": NaN,
"eval_runtime": 699.3675,
"eval_samples_per_second": 3.689,
"eval_steps_per_second": 0.922,
"step": 152
},
{
"epoch": 0.09990205680705191,
"grad_norm": 2.6223411560058594,
"learning_rate": 0.00012876842744353112,
"loss": 1.4696,
"step": 153
},
{
"epoch": 0.10055501142670584,
"grad_norm": 3.07543683052063,
"learning_rate": 0.00012757699791343186,
"loss": 1.4233,
"step": 154
},
{
"epoch": 0.10120796604635977,
"grad_norm": 3.1689348220825195,
"learning_rate": 0.0001263813163162364,
"loss": 1.6883,
"step": 155
},
{
"epoch": 0.10186092066601371,
"grad_norm": 3.1390719413757324,
"learning_rate": 0.00012518156701278019,
"loss": 1.9252,
"step": 156
},
{
"epoch": 0.10251387528566765,
"grad_norm": 2.6922318935394287,
"learning_rate": 0.00012397793499109404,
"loss": 1.4566,
"step": 157
},
{
"epoch": 0.10316682990532158,
"grad_norm": 2.970062255859375,
"learning_rate": 0.00012277060583788064,
"loss": 1.4658,
"step": 158
},
{
"epoch": 0.10381978452497552,
"grad_norm": 3.3430395126342773,
"learning_rate": 0.00012155976570989949,
"loss": 1.1341,
"step": 159
},
{
"epoch": 0.10447273914462944,
"grad_norm": 3.108964204788208,
"learning_rate": 0.0001203456013052634,
"loss": 1.4,
"step": 160
},
{
"epoch": 0.10512569376428338,
"grad_norm": 2.5076346397399902,
"learning_rate": 0.00011912829983465168,
"loss": 1.2977,
"step": 161
},
{
"epoch": 0.10577864838393732,
"grad_norm": 2.782015085220337,
"learning_rate": 0.00011790804899244452,
"loss": 1.5569,
"step": 162
},
{
"epoch": 0.10643160300359125,
"grad_norm": 2.4178273677825928,
"learning_rate": 0.00011668503692778239,
"loss": 1.588,
"step": 163
},
{
"epoch": 0.10708455762324519,
"grad_norm": 2.7303292751312256,
"learning_rate": 0.0001154594522155557,
"loss": 1.9145,
"step": 164
},
{
"epoch": 0.10773751224289912,
"grad_norm": 2.631848096847534,
"learning_rate": 0.00011423148382732853,
"loss": 1.9198,
"step": 165
},
{
"epoch": 0.10839046686255305,
"grad_norm": 2.4288294315338135,
"learning_rate": 0.00011300132110220134,
"loss": 1.9154,
"step": 166
},
{
"epoch": 0.10904342148220698,
"grad_norm": 2.742737293243408,
"learning_rate": 0.00011176915371761702,
"loss": 1.8217,
"step": 167
},
{
"epoch": 0.10969637610186092,
"grad_norm": 2.437319278717041,
"learning_rate": 0.00011053517166011471,
"loss": 1.7938,
"step": 168
},
{
"epoch": 0.11034933072151486,
"grad_norm": 2.4741907119750977,
"learning_rate": 0.00010929956519603594,
"loss": 1.7694,
"step": 169
},
{
"epoch": 0.1110022853411688,
"grad_norm": 3.3339791297912598,
"learning_rate": 0.0001080625248421878,
"loss": 1.9348,
"step": 170
},
{
"epoch": 0.11165523996082272,
"grad_norm": 3.3256616592407227,
"learning_rate": 0.0001068242413364671,
"loss": 1.8868,
"step": 171
},
{
"epoch": 0.11230819458047665,
"grad_norm": 2.873408317565918,
"learning_rate": 0.00010558490560845107,
"loss": 1.7327,
"step": 172
},
{
"epoch": 0.11296114920013059,
"grad_norm": 2.9288012981414795,
"learning_rate": 0.00010434470874995781,
"loss": 1.9178,
"step": 173
},
{
"epoch": 0.11361410381978453,
"grad_norm": 3.4907095432281494,
"learning_rate": 0.00010310384198558225,
"loss": 1.5658,
"step": 174
},
{
"epoch": 0.11426705843943846,
"grad_norm": 2.5337088108062744,
"learning_rate": 0.00010186249664321139,
"loss": 0.9634,
"step": 175
},
{
"epoch": 0.1149200130590924,
"grad_norm": 3.862973690032959,
"learning_rate": 0.00010062086412452352,
"loss": 1.7911,
"step": 176
},
{
"epoch": 0.11557296767874632,
"grad_norm": 3.585923194885254,
"learning_rate": 9.93791358754765e-05,
"loss": 1.9018,
"step": 177
},
{
"epoch": 0.11622592229840026,
"grad_norm": 2.8271729946136475,
"learning_rate": 9.813750335678866e-05,
"loss": 1.5603,
"step": 178
},
{
"epoch": 0.1168788769180542,
"grad_norm": 2.844407796859741,
"learning_rate": 9.689615801441774e-05,
"loss": 1.4833,
"step": 179
},
{
"epoch": 0.11753183153770813,
"grad_norm": 3.0475728511810303,
"learning_rate": 9.56552912500422e-05,
"loss": 1.4969,
"step": 180
},
{
"epoch": 0.11818478615736207,
"grad_norm": 3.360382318496704,
"learning_rate": 9.441509439154895e-05,
"loss": 1.7167,
"step": 181
},
{
"epoch": 0.11883774077701599,
"grad_norm": 2.9843080043792725,
"learning_rate": 9.317575866353292e-05,
"loss": 1.4516,
"step": 182
},
{
"epoch": 0.11949069539666993,
"grad_norm": 3.0425808429718018,
"learning_rate": 9.193747515781224e-05,
"loss": 1.2171,
"step": 183
},
{
"epoch": 0.12014365001632386,
"grad_norm": 3.03841495513916,
"learning_rate": 9.070043480396404e-05,
"loss": 1.2647,
"step": 184
},
{
"epoch": 0.1207966046359778,
"grad_norm": 2.7172021865844727,
"learning_rate": 8.94648283398853e-05,
"loss": 1.0993,
"step": 185
},
{
"epoch": 0.12144955925563174,
"grad_norm": 3.215526580810547,
"learning_rate": 8.823084628238298e-05,
"loss": 1.2345,
"step": 186
},
{
"epoch": 0.12210251387528567,
"grad_norm": 2.8291847705841064,
"learning_rate": 8.699867889779868e-05,
"loss": 1.7698,
"step": 187
},
{
"epoch": 0.1227554684949396,
"grad_norm": 2.8613245487213135,
"learning_rate": 8.57685161726715e-05,
"loss": 1.9141,
"step": 188
},
{
"epoch": 0.12340842311459353,
"grad_norm": 2.358774185180664,
"learning_rate": 8.454054778444431e-05,
"loss": 1.7095,
"step": 189
},
{
"epoch": 0.12406137773424747,
"grad_norm": 2.2548892498016357,
"learning_rate": 8.331496307221762e-05,
"loss": 1.7769,
"step": 190
},
{
"epoch": 0.1247143323539014,
"grad_norm": 2.9085566997528076,
"learning_rate": 8.209195100755551e-05,
"loss": 1.9351,
"step": 191
},
{
"epoch": 0.12536728697355534,
"grad_norm": 2.9980623722076416,
"learning_rate": 8.087170016534835e-05,
"loss": 2.0055,
"step": 192
},
{
"epoch": 0.12602024159320926,
"grad_norm": 2.8316338062286377,
"learning_rate": 7.965439869473664e-05,
"loss": 1.8518,
"step": 193
},
{
"epoch": 0.12667319621286321,
"grad_norm": 2.983452081680298,
"learning_rate": 7.84402342901005e-05,
"loss": 1.996,
"step": 194
},
{
"epoch": 0.12732615083251714,
"grad_norm": 2.925206184387207,
"learning_rate": 7.722939416211937e-05,
"loss": 1.9398,
"step": 195
},
{
"epoch": 0.12797910545217106,
"grad_norm": 2.671654462814331,
"learning_rate": 7.6022065008906e-05,
"loss": 1.7095,
"step": 196
},
{
"epoch": 0.128632060071825,
"grad_norm": 3.020463705062866,
"learning_rate": 7.481843298721982e-05,
"loss": 1.82,
"step": 197
},
{
"epoch": 0.12928501469147893,
"grad_norm": 2.837937593460083,
"learning_rate": 7.361868368376364e-05,
"loss": 1.5275,
"step": 198
},
{
"epoch": 0.12993796931113288,
"grad_norm": 3.599421739578247,
"learning_rate": 7.242300208656814e-05,
"loss": 1.2762,
"step": 199
},
{
"epoch": 0.1305909239307868,
"grad_norm": 3.3294129371643066,
"learning_rate": 7.123157255646888e-05,
"loss": 1.0834,
"step": 200
},
{
"epoch": 0.13124387855044076,
"grad_norm": 3.3265252113342285,
"learning_rate": 7.004457879867986e-05,
"loss": 1.8808,
"step": 201
},
{
"epoch": 0.13189683317009468,
"grad_norm": 3.3643386363983154,
"learning_rate": 6.886220383446814e-05,
"loss": 1.5511,
"step": 202
},
{
"epoch": 0.1325497877897486,
"grad_norm": 2.6517298221588135,
"learning_rate": 6.768462997293413e-05,
"loss": 1.5762,
"step": 203
},
{
"epoch": 0.13320274240940255,
"grad_norm": 3.111353874206543,
"learning_rate": 6.651203878290139e-05,
"loss": 1.4893,
"step": 204
},
{
"epoch": 0.13385569702905648,
"grad_norm": 2.823064088821411,
"learning_rate": 6.534461106492093e-05,
"loss": 1.658,
"step": 205
},
{
"epoch": 0.13450865164871043,
"grad_norm": 2.825458526611328,
"learning_rate": 6.418252682339361e-05,
"loss": 1.6023,
"step": 206
},
{
"epoch": 0.13516160626836435,
"grad_norm": 2.8322906494140625,
"learning_rate": 6.30259652388155e-05,
"loss": 1.4659,
"step": 207
},
{
"epoch": 0.13581456088801827,
"grad_norm": 2.5433881282806396,
"learning_rate": 6.187510464015022e-05,
"loss": 1.3384,
"step": 208
},
{
"epoch": 0.13646751550767222,
"grad_norm": 3.240464925765991,
"learning_rate": 6.0730122477332675e-05,
"loss": 0.9005,
"step": 209
},
{
"epoch": 0.13712047012732614,
"grad_norm": 3.5450103282928467,
"learning_rate": 5.9591195293908174e-05,
"loss": 1.3865,
"step": 210
},
{
"epoch": 0.1377734247469801,
"grad_norm": 2.67393159866333,
"learning_rate": 5.845849869981137e-05,
"loss": 1.2309,
"step": 211
},
{
"epoch": 0.13842637936663402,
"grad_norm": 3.3300986289978027,
"learning_rate": 5.733220734428919e-05,
"loss": 1.9498,
"step": 212
},
{
"epoch": 0.13907933398628794,
"grad_norm": 2.3089957237243652,
"learning_rate": 5.621249488897176e-05,
"loss": 1.7914,
"step": 213
},
{
"epoch": 0.1397322886059419,
"grad_norm": 2.3583531379699707,
"learning_rate": 5.509953398109594e-05,
"loss": 1.6828,
"step": 214
},
{
"epoch": 0.1403852432255958,
"grad_norm": 2.0636346340179443,
"learning_rate": 5.399349622688479e-05,
"loss": 1.7596,
"step": 215
},
{
"epoch": 0.14103819784524976,
"grad_norm": 2.3721492290496826,
"learning_rate": 5.2894552165087916e-05,
"loss": 1.8535,
"step": 216
},
{
"epoch": 0.14169115246490369,
"grad_norm": 2.338106632232666,
"learning_rate": 5.1802871240686234e-05,
"loss": 1.8166,
"step": 217
},
{
"epoch": 0.14234410708455764,
"grad_norm": 2.8886420726776123,
"learning_rate": 5.0718621778765476e-05,
"loss": 1.8643,
"step": 218
},
{
"epoch": 0.14299706170421156,
"grad_norm": 2.71232533454895,
"learning_rate": 4.9641970958562366e-05,
"loss": 1.5456,
"step": 219
},
{
"epoch": 0.14365001632386548,
"grad_norm": 3.262185573577881,
"learning_rate": 4.8573084787687326e-05,
"loss": 1.6296,
"step": 220
},
{
"epoch": 0.14430297094351943,
"grad_norm": 2.6300837993621826,
"learning_rate": 4.751212807652806e-05,
"loss": 1.6135,
"step": 221
},
{
"epoch": 0.14495592556317335,
"grad_norm": 3.008484363555908,
"learning_rate": 4.6459264412837454e-05,
"loss": 1.7263,
"step": 222
},
{
"epoch": 0.1456088801828273,
"grad_norm": 2.9057788848876953,
"learning_rate": 4.5414656136510334e-05,
"loss": 1.2964,
"step": 223
},
{
"epoch": 0.14626183480248123,
"grad_norm": 2.710711717605591,
"learning_rate": 4.437846431455249e-05,
"loss": 1.2502,
"step": 224
},
{
"epoch": 0.14691478942213515,
"grad_norm": 2.6256871223449707,
"learning_rate": 4.335084871624572e-05,
"loss": 0.9017,
"step": 225
},
{
"epoch": 0.1475677440417891,
"grad_norm": 3.905369997024536,
"learning_rate": 4.2331967788513295e-05,
"loss": 2.0128,
"step": 226
},
{
"epoch": 0.14822069866144302,
"grad_norm": 3.0856714248657227,
"learning_rate": 4.132197863148925e-05,
"loss": 1.7455,
"step": 227
},
{
"epoch": 0.14887365328109697,
"grad_norm": 3.04111385345459,
"learning_rate": 4.0321036974295156e-05,
"loss": 1.7097,
"step": 228
},
{
"epoch": 0.14887365328109697,
"eval_loss": NaN,
"eval_runtime": 702.6357,
"eval_samples_per_second": 3.672,
"eval_steps_per_second": 0.918,
"step": 228
},
{
"epoch": 0.1495266079007509,
"grad_norm": 2.8979952335357666,
"learning_rate": 3.932929715102863e-05,
"loss": 1.4961,
"step": 229
},
{
"epoch": 0.15017956252040482,
"grad_norm": 2.8216731548309326,
"learning_rate": 3.834691207696649e-05,
"loss": 1.5215,
"step": 230
},
{
"epoch": 0.15083251714005877,
"grad_norm": 2.5655124187469482,
"learning_rate": 3.7374033224987084e-05,
"loss": 1.5517,
"step": 231
},
{
"epoch": 0.1514854717597127,
"grad_norm": 2.572943687438965,
"learning_rate": 3.6410810602214684e-05,
"loss": 1.2292,
"step": 232
},
{
"epoch": 0.15213842637936664,
"grad_norm": 2.6719319820404053,
"learning_rate": 3.5457392726890236e-05,
"loss": 1.1804,
"step": 233
},
{
"epoch": 0.15279138099902057,
"grad_norm": 2.3251240253448486,
"learning_rate": 3.45139266054715e-05,
"loss": 0.9235,
"step": 234
},
{
"epoch": 0.15344433561867452,
"grad_norm": 2.7189831733703613,
"learning_rate": 3.3580557709966066e-05,
"loss": 0.9769,
"step": 235
},
{
"epoch": 0.15409729023832844,
"grad_norm": 2.5098495483398438,
"learning_rate": 3.2657429955501394e-05,
"loss": 1.1983,
"step": 236
},
{
"epoch": 0.15475024485798236,
"grad_norm": 2.41629695892334,
"learning_rate": 3.174468567813461e-05,
"loss": 1.7956,
"step": 237
},
{
"epoch": 0.1554031994776363,
"grad_norm": 2.5007240772247314,
"learning_rate": 3.0842465612905837e-05,
"loss": 1.7452,
"step": 238
},
{
"epoch": 0.15605615409729023,
"grad_norm": 1.9491175413131714,
"learning_rate": 2.9950908872138584e-05,
"loss": 1.3075,
"step": 239
},
{
"epoch": 0.15670910871694418,
"grad_norm": 2.146371841430664,
"learning_rate": 2.9070152923989946e-05,
"loss": 1.7472,
"step": 240
},
{
"epoch": 0.1573620633365981,
"grad_norm": 2.1137588024139404,
"learning_rate": 2.82003335712546e-05,
"loss": 1.8301,
"step": 241
},
{
"epoch": 0.15801501795625203,
"grad_norm": 2.4170126914978027,
"learning_rate": 2.7341584930425657e-05,
"loss": 1.8519,
"step": 242
},
{
"epoch": 0.15866797257590598,
"grad_norm": 2.6382076740264893,
"learning_rate": 2.6494039411015193e-05,
"loss": 2.012,
"step": 243
},
{
"epoch": 0.1593209271955599,
"grad_norm": 2.1745855808258057,
"learning_rate": 2.5657827695138372e-05,
"loss": 1.7181,
"step": 244
},
{
"epoch": 0.15997388181521385,
"grad_norm": 2.9943010807037354,
"learning_rate": 2.4833078717363544e-05,
"loss": 1.5537,
"step": 245
},
{
"epoch": 0.16062683643486778,
"grad_norm": 3.608306407928467,
"learning_rate": 2.4019919644832023e-05,
"loss": 1.7291,
"step": 246
},
{
"epoch": 0.1612797910545217,
"grad_norm": 2.9223315715789795,
"learning_rate": 2.3218475857650346e-05,
"loss": 1.7161,
"step": 247
},
{
"epoch": 0.16193274567417565,
"grad_norm": 3.305222749710083,
"learning_rate": 2.242887092955801e-05,
"loss": 1.4356,
"step": 248
},
{
"epoch": 0.16258570029382957,
"grad_norm": 2.8326313495635986,
"learning_rate": 2.1651226608873877e-05,
"loss": 1.3055,
"step": 249
},
{
"epoch": 0.16323865491348352,
"grad_norm": 3.042375087738037,
"learning_rate": 2.0885662799723804e-05,
"loss": 0.8947,
"step": 250
},
{
"epoch": 0.16389160953313744,
"grad_norm": 2.6567325592041016,
"learning_rate": 2.0132297543552757e-05,
"loss": 1.776,
"step": 251
},
{
"epoch": 0.16454456415279137,
"grad_norm": 2.997938871383667,
"learning_rate": 1.939124700092423e-05,
"loss": 1.8796,
"step": 252
},
{
"epoch": 0.16519751877244532,
"grad_norm": 2.9053895473480225,
"learning_rate": 1.866262543360958e-05,
"loss": 1.662,
"step": 253
},
{
"epoch": 0.16585047339209924,
"grad_norm": 2.6591832637786865,
"learning_rate": 1.7946545186970022e-05,
"loss": 1.6189,
"step": 254
},
{
"epoch": 0.1665034280117532,
"grad_norm": 2.9311482906341553,
"learning_rate": 1.7243116672634262e-05,
"loss": 1.5663,
"step": 255
},
{
"epoch": 0.1671563826314071,
"grad_norm": 2.9493162631988525,
"learning_rate": 1.6552448351474304e-05,
"loss": 1.5125,
"step": 256
},
{
"epoch": 0.16780933725106106,
"grad_norm": 2.19980525970459,
"learning_rate": 1.587464671688187e-05,
"loss": 1.171,
"step": 257
},
{
"epoch": 0.168462291870715,
"grad_norm": 3.4264473915100098,
"learning_rate": 1.520981627834851e-05,
"loss": 1.0619,
"step": 258
},
{
"epoch": 0.1691152464903689,
"grad_norm": 2.755953550338745,
"learning_rate": 1.4558059545351143e-05,
"loss": 1.3709,
"step": 259
},
{
"epoch": 0.16976820111002286,
"grad_norm": 3.0671868324279785,
"learning_rate": 1.3919477011546423e-05,
"loss": 1.3373,
"step": 260
},
{
"epoch": 0.17042115572967678,
"grad_norm": 2.703378200531006,
"learning_rate": 1.3294167139275593e-05,
"loss": 1.4812,
"step": 261
},
{
"epoch": 0.17107411034933073,
"grad_norm": 2.7871146202087402,
"learning_rate": 1.2682226344382796e-05,
"loss": 1.5082,
"step": 262
},
{
"epoch": 0.17172706496898466,
"grad_norm": 2.1928627490997314,
"learning_rate": 1.208374898134883e-05,
"loss": 1.7859,
"step": 263
},
{
"epoch": 0.17238001958863858,
"grad_norm": 2.330217123031616,
"learning_rate": 1.1498827328742623e-05,
"loss": 1.9261,
"step": 264
},
{
"epoch": 0.17303297420829253,
"grad_norm": 2.532562732696533,
"learning_rate": 1.0927551574992967e-05,
"loss": 1.744,
"step": 265
},
{
"epoch": 0.17368592882794645,
"grad_norm": 2.453615188598633,
"learning_rate": 1.0370009804482483e-05,
"loss": 1.8084,
"step": 266
},
{
"epoch": 0.1743388834476004,
"grad_norm": 2.09717059135437,
"learning_rate": 9.82628798396592e-06,
"loss": 1.7786,
"step": 267
},
{
"epoch": 0.17499183806725432,
"grad_norm": 3.074509620666504,
"learning_rate": 9.296469949315156e-06,
"loss": 1.9725,
"step": 268
},
{
"epoch": 0.17564479268690825,
"grad_norm": 2.6540865898132324,
"learning_rate": 8.780637392592495e-06,
"loss": 1.6455,
"step": 269
},
{
"epoch": 0.1762977473065622,
"grad_norm": 3.0181193351745605,
"learning_rate": 8.278869849454718e-06,
"loss": 1.7383,
"step": 270
},
{
"epoch": 0.17695070192621612,
"grad_norm": 3.034897565841675,
"learning_rate": 7.791244686889588e-06,
"loss": 1.7912,
"step": 271
},
{
"epoch": 0.17760365654587007,
"grad_norm": 3.0905239582061768,
"learning_rate": 7.317837091286706e-06,
"loss": 1.5027,
"step": 272
},
{
"epoch": 0.178256611165524,
"grad_norm": 3.1407511234283447,
"learning_rate": 6.858720056844614e-06,
"loss": 1.2953,
"step": 273
},
{
"epoch": 0.17890956578517794,
"grad_norm": 2.7985000610351562,
"learning_rate": 6.413964374315851e-06,
"loss": 1.4958,
"step": 274
},
{
"epoch": 0.17956252040483187,
"grad_norm": 2.70698881149292,
"learning_rate": 5.983638620091858e-06,
"loss": 1.1345,
"step": 275
},
{
"epoch": 0.1802154750244858,
"grad_norm": 3.302675485610962,
"learning_rate": 5.567809145629244e-06,
"loss": 1.8954,
"step": 276
},
{
"epoch": 0.18086842964413974,
"grad_norm": 2.7587976455688477,
"learning_rate": 5.1665400672190725e-06,
"loss": 1.6744,
"step": 277
},
{
"epoch": 0.18152138426379366,
"grad_norm": 2.4605860710144043,
"learning_rate": 4.7798932561009865e-06,
"loss": 1.4846,
"step": 278
},
{
"epoch": 0.1821743388834476,
"grad_norm": 3.0369081497192383,
"learning_rate": 4.407928328923194e-06,
"loss": 1.5561,
"step": 279
},
{
"epoch": 0.18282729350310153,
"grad_norm": 2.6894164085388184,
"learning_rate": 4.050702638550275e-06,
"loss": 1.1031,
"step": 280
},
{
"epoch": 0.18348024812275546,
"grad_norm": 2.849666118621826,
"learning_rate": 3.7082712652200867e-06,
"loss": 1.366,
"step": 281
},
{
"epoch": 0.1841332027424094,
"grad_norm": 2.834463119506836,
"learning_rate": 3.380687008050909e-06,
"loss": 1.1118,
"step": 282
},
{
"epoch": 0.18478615736206333,
"grad_norm": 2.5449914932250977,
"learning_rate": 3.068000376900515e-06,
"loss": 1.0414,
"step": 283
},
{
"epoch": 0.18543911198171728,
"grad_norm": 2.6311261653900146,
"learning_rate": 2.770259584577972e-06,
"loss": 0.9975,
"step": 284
},
{
"epoch": 0.1860920666013712,
"grad_norm": 2.644763946533203,
"learning_rate": 2.4875105394098654e-06,
"loss": 1.1698,
"step": 285
},
{
"epoch": 0.18674502122102513,
"grad_norm": 2.4958488941192627,
"learning_rate": 2.219796838161681e-06,
"loss": 1.6067,
"step": 286
},
{
"epoch": 0.18739797584067908,
"grad_norm": 2.388773202896118,
"learning_rate": 1.967159759315751e-06,
"loss": 1.9297,
"step": 287
},
{
"epoch": 0.188050930460333,
"grad_norm": 2.2818784713745117,
"learning_rate": 1.7296382567064672e-06,
"loss": 1.7271,
"step": 288
},
{
"epoch": 0.18870388507998695,
"grad_norm": 2.581960439682007,
"learning_rate": 1.5072689535141072e-06,
"loss": 1.7268,
"step": 289
},
{
"epoch": 0.18935683969964087,
"grad_norm": 2.5180134773254395,
"learning_rate": 1.3000861366179062e-06,
"loss": 1.9191,
"step": 290
},
{
"epoch": 0.19000979431929482,
"grad_norm": 2.125568151473999,
"learning_rate": 1.1081217513094212e-06,
"loss": 1.7848,
"step": 291
},
{
"epoch": 0.19066274893894875,
"grad_norm": 2.7064099311828613,
"learning_rate": 9.314053963669245e-07,
"loss": 1.6782,
"step": 292
},
{
"epoch": 0.19131570355860267,
"grad_norm": 2.437058448791504,
"learning_rate": 7.699643194915784e-07,
"loss": 1.8157,
"step": 293
},
{
"epoch": 0.19196865817825662,
"grad_norm": 3.013352870941162,
"learning_rate": 6.238234131061616e-07,
"loss": 1.3813,
"step": 294
},
{
"epoch": 0.19262161279791054,
"grad_norm": 2.7739081382751465,
"learning_rate": 4.93005210516928e-07,
"loss": 1.7656,
"step": 295
},
{
"epoch": 0.1932745674175645,
"grad_norm": 3.043750286102295,
"learning_rate": 3.775298824391982e-07,
"loss": 1.5386,
"step": 296
},
{
"epoch": 0.1939275220372184,
"grad_norm": 2.856957197189331,
"learning_rate": 2.774152338873126e-07,
"loss": 1.4946,
"step": 297
},
{
"epoch": 0.19458047665687234,
"grad_norm": 2.9437901973724365,
"learning_rate": 1.9267670142926187e-07,
"loss": 1.3205,
"step": 298
},
{
"epoch": 0.1952334312765263,
"grad_norm": 2.6561126708984375,
"learning_rate": 1.2332735080651248e-07,
"loss": 1.1922,
"step": 299
},
{
"epoch": 0.1958863858961802,
"grad_norm": 2.5239169597625732,
"learning_rate": 6.9377874919474e-08,
"loss": 0.956,
"step": 300
},
{
"epoch": 0.19653934051583416,
"grad_norm": 2.8316755294799805,
"learning_rate": 3.0836592178717926e-08,
"loss": 1.8631,
"step": 301
},
{
"epoch": 0.19719229513548808,
"grad_norm": 2.5579283237457275,
"learning_rate": 7.709445222403577e-09,
"loss": 1.4522,
"step": 302
},
{
"epoch": 0.197845249755142,
"grad_norm": 2.6677441596984863,
"learning_rate": 0.0,
"loss": 1.7175,
"step": 303
}
],
"logging_steps": 1,
"max_steps": 303,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 76,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.294759255217275e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}