sedrickkeh's picture
End of training
98899fb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9977761304670127,
"eval_steps": 500,
"global_step": 2022,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014825796886582653,
"grad_norm": 1.3803142660082024,
"learning_rate": 5e-06,
"loss": 0.6862,
"step": 10
},
{
"epoch": 0.029651593773165306,
"grad_norm": 1.790122423429332,
"learning_rate": 5e-06,
"loss": 0.6305,
"step": 20
},
{
"epoch": 0.04447739065974796,
"grad_norm": 0.8497603342076308,
"learning_rate": 5e-06,
"loss": 0.5946,
"step": 30
},
{
"epoch": 0.05930318754633061,
"grad_norm": 0.7732974567839591,
"learning_rate": 5e-06,
"loss": 0.5858,
"step": 40
},
{
"epoch": 0.07412898443291327,
"grad_norm": 0.8204168744024732,
"learning_rate": 5e-06,
"loss": 0.5662,
"step": 50
},
{
"epoch": 0.08895478131949593,
"grad_norm": 0.7530005874020861,
"learning_rate": 5e-06,
"loss": 0.564,
"step": 60
},
{
"epoch": 0.10378057820607858,
"grad_norm": 0.6979206993642474,
"learning_rate": 5e-06,
"loss": 0.5536,
"step": 70
},
{
"epoch": 0.11860637509266123,
"grad_norm": 0.9504756484437652,
"learning_rate": 5e-06,
"loss": 0.5447,
"step": 80
},
{
"epoch": 0.1334321719792439,
"grad_norm": 0.8937944128313379,
"learning_rate": 5e-06,
"loss": 0.5466,
"step": 90
},
{
"epoch": 0.14825796886582654,
"grad_norm": 0.6670560083366487,
"learning_rate": 5e-06,
"loss": 0.5354,
"step": 100
},
{
"epoch": 0.16308376575240918,
"grad_norm": 0.7364925811772932,
"learning_rate": 5e-06,
"loss": 0.535,
"step": 110
},
{
"epoch": 0.17790956263899185,
"grad_norm": 0.7183776415658619,
"learning_rate": 5e-06,
"loss": 0.5338,
"step": 120
},
{
"epoch": 0.1927353595255745,
"grad_norm": 0.5967155900573925,
"learning_rate": 5e-06,
"loss": 0.5301,
"step": 130
},
{
"epoch": 0.20756115641215717,
"grad_norm": 0.5747353912092298,
"learning_rate": 5e-06,
"loss": 0.5263,
"step": 140
},
{
"epoch": 0.2223869532987398,
"grad_norm": 0.6159997079939371,
"learning_rate": 5e-06,
"loss": 0.5248,
"step": 150
},
{
"epoch": 0.23721275018532245,
"grad_norm": 0.5680164570830968,
"learning_rate": 5e-06,
"loss": 0.5279,
"step": 160
},
{
"epoch": 0.2520385470719051,
"grad_norm": 0.5992456838263306,
"learning_rate": 5e-06,
"loss": 0.526,
"step": 170
},
{
"epoch": 0.2668643439584878,
"grad_norm": 0.5930479122165856,
"learning_rate": 5e-06,
"loss": 0.5224,
"step": 180
},
{
"epoch": 0.28169014084507044,
"grad_norm": 0.5912410054870685,
"learning_rate": 5e-06,
"loss": 0.52,
"step": 190
},
{
"epoch": 0.2965159377316531,
"grad_norm": 0.5894696404162991,
"learning_rate": 5e-06,
"loss": 0.5179,
"step": 200
},
{
"epoch": 0.3113417346182357,
"grad_norm": 0.5643703980877148,
"learning_rate": 5e-06,
"loss": 0.5182,
"step": 210
},
{
"epoch": 0.32616753150481836,
"grad_norm": 0.5861129018729444,
"learning_rate": 5e-06,
"loss": 0.5104,
"step": 220
},
{
"epoch": 0.34099332839140106,
"grad_norm": 0.6236624375674511,
"learning_rate": 5e-06,
"loss": 0.5131,
"step": 230
},
{
"epoch": 0.3558191252779837,
"grad_norm": 0.5812434696304949,
"learning_rate": 5e-06,
"loss": 0.5191,
"step": 240
},
{
"epoch": 0.37064492216456635,
"grad_norm": 0.7181156036216183,
"learning_rate": 5e-06,
"loss": 0.5093,
"step": 250
},
{
"epoch": 0.385470719051149,
"grad_norm": 0.587369525728773,
"learning_rate": 5e-06,
"loss": 0.5161,
"step": 260
},
{
"epoch": 0.40029651593773163,
"grad_norm": 0.5460966546871288,
"learning_rate": 5e-06,
"loss": 0.5178,
"step": 270
},
{
"epoch": 0.41512231282431433,
"grad_norm": 0.5876447069011888,
"learning_rate": 5e-06,
"loss": 0.5142,
"step": 280
},
{
"epoch": 0.429948109710897,
"grad_norm": 0.816294324192534,
"learning_rate": 5e-06,
"loss": 0.5125,
"step": 290
},
{
"epoch": 0.4447739065974796,
"grad_norm": 0.5492628600148616,
"learning_rate": 5e-06,
"loss": 0.5078,
"step": 300
},
{
"epoch": 0.45959970348406226,
"grad_norm": 0.579955709352068,
"learning_rate": 5e-06,
"loss": 0.5045,
"step": 310
},
{
"epoch": 0.4744255003706449,
"grad_norm": 0.6279455964566482,
"learning_rate": 5e-06,
"loss": 0.5047,
"step": 320
},
{
"epoch": 0.4892512972572276,
"grad_norm": 0.6390417903003635,
"learning_rate": 5e-06,
"loss": 0.5067,
"step": 330
},
{
"epoch": 0.5040770941438102,
"grad_norm": 0.5816721190754486,
"learning_rate": 5e-06,
"loss": 0.5135,
"step": 340
},
{
"epoch": 0.5189028910303929,
"grad_norm": 0.6458696214772466,
"learning_rate": 5e-06,
"loss": 0.5034,
"step": 350
},
{
"epoch": 0.5337286879169756,
"grad_norm": 0.6957712465947605,
"learning_rate": 5e-06,
"loss": 0.5048,
"step": 360
},
{
"epoch": 0.5485544848035582,
"grad_norm": 0.543098433920077,
"learning_rate": 5e-06,
"loss": 0.4991,
"step": 370
},
{
"epoch": 0.5633802816901409,
"grad_norm": 0.7663819453316695,
"learning_rate": 5e-06,
"loss": 0.5025,
"step": 380
},
{
"epoch": 0.5782060785767235,
"grad_norm": 0.5194777261335899,
"learning_rate": 5e-06,
"loss": 0.5072,
"step": 390
},
{
"epoch": 0.5930318754633062,
"grad_norm": 0.8363946362832739,
"learning_rate": 5e-06,
"loss": 0.4948,
"step": 400
},
{
"epoch": 0.6078576723498889,
"grad_norm": 0.5430505650241441,
"learning_rate": 5e-06,
"loss": 0.5024,
"step": 410
},
{
"epoch": 0.6226834692364714,
"grad_norm": 0.5574129318838634,
"learning_rate": 5e-06,
"loss": 0.5034,
"step": 420
},
{
"epoch": 0.6375092661230541,
"grad_norm": 0.701668057920261,
"learning_rate": 5e-06,
"loss": 0.4912,
"step": 430
},
{
"epoch": 0.6523350630096367,
"grad_norm": 0.5952225478810055,
"learning_rate": 5e-06,
"loss": 0.4898,
"step": 440
},
{
"epoch": 0.6671608598962194,
"grad_norm": 0.585331803107272,
"learning_rate": 5e-06,
"loss": 0.5027,
"step": 450
},
{
"epoch": 0.6819866567828021,
"grad_norm": 0.5906175115867641,
"learning_rate": 5e-06,
"loss": 0.49,
"step": 460
},
{
"epoch": 0.6968124536693847,
"grad_norm": 0.5782512807193211,
"learning_rate": 5e-06,
"loss": 0.4962,
"step": 470
},
{
"epoch": 0.7116382505559674,
"grad_norm": 0.528401047427795,
"learning_rate": 5e-06,
"loss": 0.4979,
"step": 480
},
{
"epoch": 0.72646404744255,
"grad_norm": 0.7128769222059271,
"learning_rate": 5e-06,
"loss": 0.4982,
"step": 490
},
{
"epoch": 0.7412898443291327,
"grad_norm": 0.5767857437742224,
"learning_rate": 5e-06,
"loss": 0.4891,
"step": 500
},
{
"epoch": 0.7561156412157154,
"grad_norm": 0.6930280814580314,
"learning_rate": 5e-06,
"loss": 0.5006,
"step": 510
},
{
"epoch": 0.770941438102298,
"grad_norm": 0.6530314579756452,
"learning_rate": 5e-06,
"loss": 0.4966,
"step": 520
},
{
"epoch": 0.7857672349888807,
"grad_norm": 0.6639683423874188,
"learning_rate": 5e-06,
"loss": 0.4962,
"step": 530
},
{
"epoch": 0.8005930318754633,
"grad_norm": 0.5556537170587046,
"learning_rate": 5e-06,
"loss": 0.4947,
"step": 540
},
{
"epoch": 0.815418828762046,
"grad_norm": 0.5740824829726571,
"learning_rate": 5e-06,
"loss": 0.4919,
"step": 550
},
{
"epoch": 0.8302446256486287,
"grad_norm": 0.5307405473088794,
"learning_rate": 5e-06,
"loss": 0.4864,
"step": 560
},
{
"epoch": 0.8450704225352113,
"grad_norm": 0.5249601710096029,
"learning_rate": 5e-06,
"loss": 0.4926,
"step": 570
},
{
"epoch": 0.859896219421794,
"grad_norm": 0.5263542668413514,
"learning_rate": 5e-06,
"loss": 0.4877,
"step": 580
},
{
"epoch": 0.8747220163083765,
"grad_norm": 0.5027236096291062,
"learning_rate": 5e-06,
"loss": 0.4903,
"step": 590
},
{
"epoch": 0.8895478131949592,
"grad_norm": 0.687053549139694,
"learning_rate": 5e-06,
"loss": 0.4945,
"step": 600
},
{
"epoch": 0.9043736100815419,
"grad_norm": 0.6064169430923966,
"learning_rate": 5e-06,
"loss": 0.4898,
"step": 610
},
{
"epoch": 0.9191994069681245,
"grad_norm": 0.5127672913798731,
"learning_rate": 5e-06,
"loss": 0.4916,
"step": 620
},
{
"epoch": 0.9340252038547072,
"grad_norm": 0.5911308230908845,
"learning_rate": 5e-06,
"loss": 0.4876,
"step": 630
},
{
"epoch": 0.9488510007412898,
"grad_norm": 0.5318485612638623,
"learning_rate": 5e-06,
"loss": 0.4911,
"step": 640
},
{
"epoch": 0.9636767976278725,
"grad_norm": 0.599135799828323,
"learning_rate": 5e-06,
"loss": 0.4907,
"step": 650
},
{
"epoch": 0.9785025945144552,
"grad_norm": 0.5017158299240659,
"learning_rate": 5e-06,
"loss": 0.4864,
"step": 660
},
{
"epoch": 0.9933283914010378,
"grad_norm": 0.638905966589537,
"learning_rate": 5e-06,
"loss": 0.4886,
"step": 670
},
{
"epoch": 0.9992587101556709,
"eval_loss": 0.489400178194046,
"eval_runtime": 119.2986,
"eval_samples_per_second": 152.332,
"eval_steps_per_second": 0.595,
"step": 674
},
{
"epoch": 1.0081541882876204,
"grad_norm": 0.8019081623682851,
"learning_rate": 5e-06,
"loss": 0.4775,
"step": 680
},
{
"epoch": 1.0229799851742032,
"grad_norm": 0.5833948483649493,
"learning_rate": 5e-06,
"loss": 0.4344,
"step": 690
},
{
"epoch": 1.0378057820607858,
"grad_norm": 0.5717232078953663,
"learning_rate": 5e-06,
"loss": 0.4372,
"step": 700
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.5320074952877888,
"learning_rate": 5e-06,
"loss": 0.4376,
"step": 710
},
{
"epoch": 1.0674573758339512,
"grad_norm": 0.5726271946361784,
"learning_rate": 5e-06,
"loss": 0.4314,
"step": 720
},
{
"epoch": 1.0822831727205338,
"grad_norm": 0.596509516956136,
"learning_rate": 5e-06,
"loss": 0.4347,
"step": 730
},
{
"epoch": 1.0971089696071163,
"grad_norm": 0.5390515390075241,
"learning_rate": 5e-06,
"loss": 0.434,
"step": 740
},
{
"epoch": 1.111934766493699,
"grad_norm": 0.6129914776744078,
"learning_rate": 5e-06,
"loss": 0.4354,
"step": 750
},
{
"epoch": 1.1267605633802817,
"grad_norm": 0.5592764678336093,
"learning_rate": 5e-06,
"loss": 0.4368,
"step": 760
},
{
"epoch": 1.1415863602668643,
"grad_norm": 0.7060047642916599,
"learning_rate": 5e-06,
"loss": 0.443,
"step": 770
},
{
"epoch": 1.156412157153447,
"grad_norm": 0.5940229109864722,
"learning_rate": 5e-06,
"loss": 0.4433,
"step": 780
},
{
"epoch": 1.1712379540400297,
"grad_norm": 0.5491622519034774,
"learning_rate": 5e-06,
"loss": 0.4304,
"step": 790
},
{
"epoch": 1.1860637509266123,
"grad_norm": 0.6079761033641661,
"learning_rate": 5e-06,
"loss": 0.438,
"step": 800
},
{
"epoch": 1.200889547813195,
"grad_norm": 0.5693457065422755,
"learning_rate": 5e-06,
"loss": 0.4403,
"step": 810
},
{
"epoch": 1.2157153446997775,
"grad_norm": 0.6505910795677263,
"learning_rate": 5e-06,
"loss": 0.4375,
"step": 820
},
{
"epoch": 1.2305411415863603,
"grad_norm": 0.6268213064250889,
"learning_rate": 5e-06,
"loss": 0.4373,
"step": 830
},
{
"epoch": 1.2453669384729429,
"grad_norm": 0.5919676508645508,
"learning_rate": 5e-06,
"loss": 0.4306,
"step": 840
},
{
"epoch": 1.2601927353595257,
"grad_norm": 0.5398299551449609,
"learning_rate": 5e-06,
"loss": 0.4353,
"step": 850
},
{
"epoch": 1.2750185322461083,
"grad_norm": 0.5424618353666834,
"learning_rate": 5e-06,
"loss": 0.4376,
"step": 860
},
{
"epoch": 1.2898443291326909,
"grad_norm": 0.5634395154305121,
"learning_rate": 5e-06,
"loss": 0.4361,
"step": 870
},
{
"epoch": 1.3046701260192735,
"grad_norm": 0.5785455634852342,
"learning_rate": 5e-06,
"loss": 0.4367,
"step": 880
},
{
"epoch": 1.3194959229058563,
"grad_norm": 0.620601502244543,
"learning_rate": 5e-06,
"loss": 0.4367,
"step": 890
},
{
"epoch": 1.3343217197924389,
"grad_norm": 0.530395060539937,
"learning_rate": 5e-06,
"loss": 0.4347,
"step": 900
},
{
"epoch": 1.3491475166790214,
"grad_norm": 0.5770452107287607,
"learning_rate": 5e-06,
"loss": 0.4391,
"step": 910
},
{
"epoch": 1.3639733135656043,
"grad_norm": 0.5858170710249173,
"learning_rate": 5e-06,
"loss": 0.4343,
"step": 920
},
{
"epoch": 1.3787991104521868,
"grad_norm": 0.5500756623276715,
"learning_rate": 5e-06,
"loss": 0.4346,
"step": 930
},
{
"epoch": 1.3936249073387694,
"grad_norm": 0.6255449782390206,
"learning_rate": 5e-06,
"loss": 0.4389,
"step": 940
},
{
"epoch": 1.408450704225352,
"grad_norm": 0.5460524509865728,
"learning_rate": 5e-06,
"loss": 0.4369,
"step": 950
},
{
"epoch": 1.4232765011119348,
"grad_norm": 0.5924885663735421,
"learning_rate": 5e-06,
"loss": 0.4338,
"step": 960
},
{
"epoch": 1.4381022979985174,
"grad_norm": 0.5764645405905721,
"learning_rate": 5e-06,
"loss": 0.4367,
"step": 970
},
{
"epoch": 1.4529280948851,
"grad_norm": 0.5850117598054916,
"learning_rate": 5e-06,
"loss": 0.4362,
"step": 980
},
{
"epoch": 1.4677538917716828,
"grad_norm": 0.6081832575580323,
"learning_rate": 5e-06,
"loss": 0.4406,
"step": 990
},
{
"epoch": 1.4825796886582654,
"grad_norm": 0.543363234220218,
"learning_rate": 5e-06,
"loss": 0.4351,
"step": 1000
},
{
"epoch": 1.497405485544848,
"grad_norm": 0.5448664659996519,
"learning_rate": 5e-06,
"loss": 0.436,
"step": 1010
},
{
"epoch": 1.5122312824314306,
"grad_norm": 0.65441524183388,
"learning_rate": 5e-06,
"loss": 0.4372,
"step": 1020
},
{
"epoch": 1.5270570793180134,
"grad_norm": 0.5280772751509661,
"learning_rate": 5e-06,
"loss": 0.4357,
"step": 1030
},
{
"epoch": 1.541882876204596,
"grad_norm": 0.5723443909631686,
"learning_rate": 5e-06,
"loss": 0.4318,
"step": 1040
},
{
"epoch": 1.5567086730911788,
"grad_norm": 0.5085846011938958,
"learning_rate": 5e-06,
"loss": 0.4323,
"step": 1050
},
{
"epoch": 1.5715344699777614,
"grad_norm": 0.638285906027132,
"learning_rate": 5e-06,
"loss": 0.4415,
"step": 1060
},
{
"epoch": 1.586360266864344,
"grad_norm": 0.571218693857694,
"learning_rate": 5e-06,
"loss": 0.4346,
"step": 1070
},
{
"epoch": 1.6011860637509265,
"grad_norm": 0.517292661892654,
"learning_rate": 5e-06,
"loss": 0.4382,
"step": 1080
},
{
"epoch": 1.6160118606375091,
"grad_norm": 0.5102023179424974,
"learning_rate": 5e-06,
"loss": 0.4323,
"step": 1090
},
{
"epoch": 1.630837657524092,
"grad_norm": 0.5721112226520383,
"learning_rate": 5e-06,
"loss": 0.4378,
"step": 1100
},
{
"epoch": 1.6456634544106745,
"grad_norm": 0.5551329559644929,
"learning_rate": 5e-06,
"loss": 0.4348,
"step": 1110
},
{
"epoch": 1.6604892512972573,
"grad_norm": 0.6119221260699137,
"learning_rate": 5e-06,
"loss": 0.4274,
"step": 1120
},
{
"epoch": 1.67531504818384,
"grad_norm": 0.5333241050316515,
"learning_rate": 5e-06,
"loss": 0.432,
"step": 1130
},
{
"epoch": 1.6901408450704225,
"grad_norm": 0.564328179152952,
"learning_rate": 5e-06,
"loss": 0.4391,
"step": 1140
},
{
"epoch": 1.704966641957005,
"grad_norm": 0.5254085440281446,
"learning_rate": 5e-06,
"loss": 0.4359,
"step": 1150
},
{
"epoch": 1.7197924388435877,
"grad_norm": 0.5996709499991413,
"learning_rate": 5e-06,
"loss": 0.4355,
"step": 1160
},
{
"epoch": 1.7346182357301705,
"grad_norm": 0.5493385200435505,
"learning_rate": 5e-06,
"loss": 0.432,
"step": 1170
},
{
"epoch": 1.7494440326167533,
"grad_norm": 0.5906833738744632,
"learning_rate": 5e-06,
"loss": 0.4367,
"step": 1180
},
{
"epoch": 1.7642698295033359,
"grad_norm": 0.5341688856567031,
"learning_rate": 5e-06,
"loss": 0.4298,
"step": 1190
},
{
"epoch": 1.7790956263899185,
"grad_norm": 0.533463029816806,
"learning_rate": 5e-06,
"loss": 0.4344,
"step": 1200
},
{
"epoch": 1.793921423276501,
"grad_norm": 0.5606747279800302,
"learning_rate": 5e-06,
"loss": 0.4362,
"step": 1210
},
{
"epoch": 1.8087472201630836,
"grad_norm": 0.5183566067624092,
"learning_rate": 5e-06,
"loss": 0.4271,
"step": 1220
},
{
"epoch": 1.8235730170496665,
"grad_norm": 0.5326764319923035,
"learning_rate": 5e-06,
"loss": 0.4306,
"step": 1230
},
{
"epoch": 1.838398813936249,
"grad_norm": 0.5967407273470395,
"learning_rate": 5e-06,
"loss": 0.4344,
"step": 1240
},
{
"epoch": 1.8532246108228319,
"grad_norm": 0.569200724691502,
"learning_rate": 5e-06,
"loss": 0.4353,
"step": 1250
},
{
"epoch": 1.8680504077094144,
"grad_norm": 0.5364248806496852,
"learning_rate": 5e-06,
"loss": 0.4364,
"step": 1260
},
{
"epoch": 1.882876204595997,
"grad_norm": 0.5385402357985632,
"learning_rate": 5e-06,
"loss": 0.4379,
"step": 1270
},
{
"epoch": 1.8977020014825796,
"grad_norm": 0.5223344068630751,
"learning_rate": 5e-06,
"loss": 0.4347,
"step": 1280
},
{
"epoch": 1.9125277983691622,
"grad_norm": 0.5617283138858877,
"learning_rate": 5e-06,
"loss": 0.4319,
"step": 1290
},
{
"epoch": 1.927353595255745,
"grad_norm": 0.5893151487107228,
"learning_rate": 5e-06,
"loss": 0.4313,
"step": 1300
},
{
"epoch": 1.9421793921423276,
"grad_norm": 0.5366729507399915,
"learning_rate": 5e-06,
"loss": 0.4399,
"step": 1310
},
{
"epoch": 1.9570051890289104,
"grad_norm": 0.5521187635061144,
"learning_rate": 5e-06,
"loss": 0.4346,
"step": 1320
},
{
"epoch": 1.971830985915493,
"grad_norm": 0.5736814101430955,
"learning_rate": 5e-06,
"loss": 0.4344,
"step": 1330
},
{
"epoch": 1.9866567828020756,
"grad_norm": 0.6153323220981107,
"learning_rate": 5e-06,
"loss": 0.4288,
"step": 1340
},
{
"epoch": 2.0,
"eval_loss": 0.47887247800827026,
"eval_runtime": 117.4023,
"eval_samples_per_second": 154.793,
"eval_steps_per_second": 0.605,
"step": 1349
},
{
"epoch": 2.001482579688658,
"grad_norm": 0.8057512126070844,
"learning_rate": 5e-06,
"loss": 0.4465,
"step": 1350
},
{
"epoch": 2.0163083765752408,
"grad_norm": 0.6985686766657238,
"learning_rate": 5e-06,
"loss": 0.3758,
"step": 1360
},
{
"epoch": 2.031134173461824,
"grad_norm": 0.7516592382333935,
"learning_rate": 5e-06,
"loss": 0.3739,
"step": 1370
},
{
"epoch": 2.0459599703484064,
"grad_norm": 0.6560426338450871,
"learning_rate": 5e-06,
"loss": 0.3733,
"step": 1380
},
{
"epoch": 2.060785767234989,
"grad_norm": 0.6554981025712345,
"learning_rate": 5e-06,
"loss": 0.3738,
"step": 1390
},
{
"epoch": 2.0756115641215716,
"grad_norm": 0.5777290379829684,
"learning_rate": 5e-06,
"loss": 0.373,
"step": 1400
},
{
"epoch": 2.090437361008154,
"grad_norm": 0.6039864607065941,
"learning_rate": 5e-06,
"loss": 0.3726,
"step": 1410
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.5737027873462299,
"learning_rate": 5e-06,
"loss": 0.3745,
"step": 1420
},
{
"epoch": 2.1200889547813193,
"grad_norm": 0.5962503173170325,
"learning_rate": 5e-06,
"loss": 0.3793,
"step": 1430
},
{
"epoch": 2.1349147516679023,
"grad_norm": 0.5896368959935617,
"learning_rate": 5e-06,
"loss": 0.3783,
"step": 1440
},
{
"epoch": 2.149740548554485,
"grad_norm": 0.5817443097816994,
"learning_rate": 5e-06,
"loss": 0.3775,
"step": 1450
},
{
"epoch": 2.1645663454410675,
"grad_norm": 0.5749673449125087,
"learning_rate": 5e-06,
"loss": 0.378,
"step": 1460
},
{
"epoch": 2.17939214232765,
"grad_norm": 0.581781908254146,
"learning_rate": 5e-06,
"loss": 0.3694,
"step": 1470
},
{
"epoch": 2.1942179392142327,
"grad_norm": 0.5991853455621899,
"learning_rate": 5e-06,
"loss": 0.3821,
"step": 1480
},
{
"epoch": 2.2090437361008153,
"grad_norm": 0.5786028266566305,
"learning_rate": 5e-06,
"loss": 0.3761,
"step": 1490
},
{
"epoch": 2.223869532987398,
"grad_norm": 0.6524631416486719,
"learning_rate": 5e-06,
"loss": 0.3733,
"step": 1500
},
{
"epoch": 2.238695329873981,
"grad_norm": 0.66274701181455,
"learning_rate": 5e-06,
"loss": 0.3812,
"step": 1510
},
{
"epoch": 2.2535211267605635,
"grad_norm": 0.5890654308630106,
"learning_rate": 5e-06,
"loss": 0.3785,
"step": 1520
},
{
"epoch": 2.268346923647146,
"grad_norm": 0.5995580394178166,
"learning_rate": 5e-06,
"loss": 0.3766,
"step": 1530
},
{
"epoch": 2.2831727205337287,
"grad_norm": 0.5621523912385117,
"learning_rate": 5e-06,
"loss": 0.3819,
"step": 1540
},
{
"epoch": 2.2979985174203112,
"grad_norm": 0.5631391589331654,
"learning_rate": 5e-06,
"loss": 0.3831,
"step": 1550
},
{
"epoch": 2.312824314306894,
"grad_norm": 0.598394877682484,
"learning_rate": 5e-06,
"loss": 0.3776,
"step": 1560
},
{
"epoch": 2.327650111193477,
"grad_norm": 0.5751635376845493,
"learning_rate": 5e-06,
"loss": 0.3743,
"step": 1570
},
{
"epoch": 2.3424759080800595,
"grad_norm": 0.7431168749670549,
"learning_rate": 5e-06,
"loss": 0.3806,
"step": 1580
},
{
"epoch": 2.357301704966642,
"grad_norm": 0.6148778429363163,
"learning_rate": 5e-06,
"loss": 0.3792,
"step": 1590
},
{
"epoch": 2.3721275018532246,
"grad_norm": 0.5904751763294604,
"learning_rate": 5e-06,
"loss": 0.3779,
"step": 1600
},
{
"epoch": 2.386953298739807,
"grad_norm": 0.6160494878206139,
"learning_rate": 5e-06,
"loss": 0.3795,
"step": 1610
},
{
"epoch": 2.40177909562639,
"grad_norm": 0.6601509703827891,
"learning_rate": 5e-06,
"loss": 0.381,
"step": 1620
},
{
"epoch": 2.4166048925129724,
"grad_norm": 0.5557483892389672,
"learning_rate": 5e-06,
"loss": 0.38,
"step": 1630
},
{
"epoch": 2.431430689399555,
"grad_norm": 0.5514157650195483,
"learning_rate": 5e-06,
"loss": 0.3785,
"step": 1640
},
{
"epoch": 2.446256486286138,
"grad_norm": 0.5896166088948557,
"learning_rate": 5e-06,
"loss": 0.3785,
"step": 1650
},
{
"epoch": 2.4610822831727206,
"grad_norm": 0.6187824694109818,
"learning_rate": 5e-06,
"loss": 0.3866,
"step": 1660
},
{
"epoch": 2.475908080059303,
"grad_norm": 0.6196885650354601,
"learning_rate": 5e-06,
"loss": 0.3773,
"step": 1670
},
{
"epoch": 2.4907338769458858,
"grad_norm": 0.6497541102453935,
"learning_rate": 5e-06,
"loss": 0.3792,
"step": 1680
},
{
"epoch": 2.5055596738324684,
"grad_norm": 0.6221830439300937,
"learning_rate": 5e-06,
"loss": 0.3814,
"step": 1690
},
{
"epoch": 2.5203854707190514,
"grad_norm": 0.5575748670805571,
"learning_rate": 5e-06,
"loss": 0.3788,
"step": 1700
},
{
"epoch": 2.535211267605634,
"grad_norm": 0.5820255267213036,
"learning_rate": 5e-06,
"loss": 0.3797,
"step": 1710
},
{
"epoch": 2.5500370644922166,
"grad_norm": 0.5778840920723647,
"learning_rate": 5e-06,
"loss": 0.3836,
"step": 1720
},
{
"epoch": 2.564862861378799,
"grad_norm": 0.6369372874220104,
"learning_rate": 5e-06,
"loss": 0.3822,
"step": 1730
},
{
"epoch": 2.5796886582653817,
"grad_norm": 0.5836272270823646,
"learning_rate": 5e-06,
"loss": 0.3825,
"step": 1740
},
{
"epoch": 2.5945144551519643,
"grad_norm": 0.5893118830611698,
"learning_rate": 5e-06,
"loss": 0.382,
"step": 1750
},
{
"epoch": 2.609340252038547,
"grad_norm": 0.691740548026418,
"learning_rate": 5e-06,
"loss": 0.3828,
"step": 1760
},
{
"epoch": 2.6241660489251295,
"grad_norm": 0.6016891230542475,
"learning_rate": 5e-06,
"loss": 0.3841,
"step": 1770
},
{
"epoch": 2.6389918458117125,
"grad_norm": 0.5709726000324739,
"learning_rate": 5e-06,
"loss": 0.38,
"step": 1780
},
{
"epoch": 2.653817642698295,
"grad_norm": 0.5707418370568915,
"learning_rate": 5e-06,
"loss": 0.3819,
"step": 1790
},
{
"epoch": 2.6686434395848777,
"grad_norm": 0.6121781418312778,
"learning_rate": 5e-06,
"loss": 0.3775,
"step": 1800
},
{
"epoch": 2.6834692364714603,
"grad_norm": 0.5845205444282813,
"learning_rate": 5e-06,
"loss": 0.3794,
"step": 1810
},
{
"epoch": 2.698295033358043,
"grad_norm": 0.6090204118467554,
"learning_rate": 5e-06,
"loss": 0.3749,
"step": 1820
},
{
"epoch": 2.713120830244626,
"grad_norm": 0.6311725348745423,
"learning_rate": 5e-06,
"loss": 0.3875,
"step": 1830
},
{
"epoch": 2.7279466271312085,
"grad_norm": 0.5855235496792751,
"learning_rate": 5e-06,
"loss": 0.3798,
"step": 1840
},
{
"epoch": 2.742772424017791,
"grad_norm": 0.6360580241341309,
"learning_rate": 5e-06,
"loss": 0.3795,
"step": 1850
},
{
"epoch": 2.7575982209043737,
"grad_norm": 0.6949332357026632,
"learning_rate": 5e-06,
"loss": 0.3833,
"step": 1860
},
{
"epoch": 2.7724240177909563,
"grad_norm": 0.5661995725449918,
"learning_rate": 5e-06,
"loss": 0.3815,
"step": 1870
},
{
"epoch": 2.787249814677539,
"grad_norm": 0.6181579391717933,
"learning_rate": 5e-06,
"loss": 0.3814,
"step": 1880
},
{
"epoch": 2.8020756115641214,
"grad_norm": 0.621591189180585,
"learning_rate": 5e-06,
"loss": 0.3839,
"step": 1890
},
{
"epoch": 2.816901408450704,
"grad_norm": 0.6102808235648141,
"learning_rate": 5e-06,
"loss": 0.386,
"step": 1900
},
{
"epoch": 2.8317272053372866,
"grad_norm": 0.5570401466362744,
"learning_rate": 5e-06,
"loss": 0.3829,
"step": 1910
},
{
"epoch": 2.8465530022238696,
"grad_norm": 0.605864801280795,
"learning_rate": 5e-06,
"loss": 0.3791,
"step": 1920
},
{
"epoch": 2.8613787991104522,
"grad_norm": 0.5737361891170868,
"learning_rate": 5e-06,
"loss": 0.3831,
"step": 1930
},
{
"epoch": 2.876204595997035,
"grad_norm": 0.5867020788912005,
"learning_rate": 5e-06,
"loss": 0.38,
"step": 1940
},
{
"epoch": 2.8910303928836174,
"grad_norm": 0.557534237372008,
"learning_rate": 5e-06,
"loss": 0.3825,
"step": 1950
},
{
"epoch": 2.9058561897702,
"grad_norm": 0.5964701402385094,
"learning_rate": 5e-06,
"loss": 0.3852,
"step": 1960
},
{
"epoch": 2.920681986656783,
"grad_norm": 0.600588952238815,
"learning_rate": 5e-06,
"loss": 0.3876,
"step": 1970
},
{
"epoch": 2.9355077835433656,
"grad_norm": 0.5765877577022089,
"learning_rate": 5e-06,
"loss": 0.3851,
"step": 1980
},
{
"epoch": 2.950333580429948,
"grad_norm": 0.6187803453792526,
"learning_rate": 5e-06,
"loss": 0.3818,
"step": 1990
},
{
"epoch": 2.965159377316531,
"grad_norm": 0.5518073320285379,
"learning_rate": 5e-06,
"loss": 0.3834,
"step": 2000
},
{
"epoch": 2.9799851742031134,
"grad_norm": 0.5343617840090896,
"learning_rate": 5e-06,
"loss": 0.3772,
"step": 2010
},
{
"epoch": 2.994810971089696,
"grad_norm": 0.5629835017462416,
"learning_rate": 5e-06,
"loss": 0.382,
"step": 2020
},
{
"epoch": 2.9977761304670127,
"eval_loss": 0.4834764897823334,
"eval_runtime": 117.0978,
"eval_samples_per_second": 155.195,
"eval_steps_per_second": 0.606,
"step": 2022
},
{
"epoch": 2.9977761304670127,
"step": 2022,
"total_flos": 3386087791656960.0,
"train_loss": 0.4438282892800698,
"train_runtime": 17391.1323,
"train_samples_per_second": 59.561,
"train_steps_per_second": 0.116
}
],
"logging_steps": 10,
"max_steps": 2022,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3386087791656960.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}