feedback_p0.1_seed42_level2_style / trainer_state.json
terry69's picture
Model save
3192fa3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9992217898832685,
"eval_steps": 500,
"global_step": 963,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010376134889753567,
"grad_norm": 23.969658904297845,
"learning_rate": 1.0309278350515465e-07,
"loss": 1.3725,
"step": 1
},
{
"epoch": 0.005188067444876783,
"grad_norm": 22.449071975220065,
"learning_rate": 5.154639175257732e-07,
"loss": 1.3719,
"step": 5
},
{
"epoch": 0.010376134889753566,
"grad_norm": 8.548339530816044,
"learning_rate": 1.0309278350515464e-06,
"loss": 1.2558,
"step": 10
},
{
"epoch": 0.01556420233463035,
"grad_norm": 8.215121326664015,
"learning_rate": 1.5463917525773197e-06,
"loss": 1.081,
"step": 15
},
{
"epoch": 0.020752269779507133,
"grad_norm": 3.0841636846785283,
"learning_rate": 2.061855670103093e-06,
"loss": 0.9504,
"step": 20
},
{
"epoch": 0.02594033722438392,
"grad_norm": 2.3540131831672575,
"learning_rate": 2.577319587628866e-06,
"loss": 0.9092,
"step": 25
},
{
"epoch": 0.0311284046692607,
"grad_norm": 2.217217553056043,
"learning_rate": 3.0927835051546395e-06,
"loss": 0.8692,
"step": 30
},
{
"epoch": 0.03631647211413749,
"grad_norm": 2.2563290398206615,
"learning_rate": 3.6082474226804126e-06,
"loss": 0.8445,
"step": 35
},
{
"epoch": 0.041504539559014265,
"grad_norm": 2.2874485501473907,
"learning_rate": 4.123711340206186e-06,
"loss": 0.8336,
"step": 40
},
{
"epoch": 0.04669260700389105,
"grad_norm": 2.3330889165134967,
"learning_rate": 4.639175257731959e-06,
"loss": 0.8218,
"step": 45
},
{
"epoch": 0.05188067444876784,
"grad_norm": 2.16633956379982,
"learning_rate": 5.154639175257732e-06,
"loss": 0.8255,
"step": 50
},
{
"epoch": 0.057068741893644616,
"grad_norm": 2.355679025636223,
"learning_rate": 5.670103092783505e-06,
"loss": 0.7891,
"step": 55
},
{
"epoch": 0.0622568093385214,
"grad_norm": 2.4839432117685036,
"learning_rate": 6.185567010309279e-06,
"loss": 0.7814,
"step": 60
},
{
"epoch": 0.06744487678339818,
"grad_norm": 2.480013950899919,
"learning_rate": 6.701030927835052e-06,
"loss": 0.7674,
"step": 65
},
{
"epoch": 0.07263294422827497,
"grad_norm": 2.3378408946103284,
"learning_rate": 7.216494845360825e-06,
"loss": 0.766,
"step": 70
},
{
"epoch": 0.07782101167315175,
"grad_norm": 2.2751461973205482,
"learning_rate": 7.731958762886599e-06,
"loss": 0.7433,
"step": 75
},
{
"epoch": 0.08300907911802853,
"grad_norm": 2.428511002623931,
"learning_rate": 8.247422680412371e-06,
"loss": 0.7414,
"step": 80
},
{
"epoch": 0.08819714656290532,
"grad_norm": 2.4298836600324045,
"learning_rate": 8.762886597938146e-06,
"loss": 0.7358,
"step": 85
},
{
"epoch": 0.0933852140077821,
"grad_norm": 2.41313947506571,
"learning_rate": 9.278350515463918e-06,
"loss": 0.7319,
"step": 90
},
{
"epoch": 0.09857328145265888,
"grad_norm": 2.4451429150679274,
"learning_rate": 9.793814432989691e-06,
"loss": 0.7323,
"step": 95
},
{
"epoch": 0.10376134889753567,
"grad_norm": 2.450193589248992,
"learning_rate": 9.999703897419048e-06,
"loss": 0.7231,
"step": 100
},
{
"epoch": 0.10894941634241245,
"grad_norm": 2.271786014084883,
"learning_rate": 9.997894508649995e-06,
"loss": 0.7149,
"step": 105
},
{
"epoch": 0.11413748378728923,
"grad_norm": 2.354564055245926,
"learning_rate": 9.99444082710777e-06,
"loss": 0.708,
"step": 110
},
{
"epoch": 0.11932555123216602,
"grad_norm": 2.220428698962425,
"learning_rate": 9.989343989043563e-06,
"loss": 0.7216,
"step": 115
},
{
"epoch": 0.1245136186770428,
"grad_norm": 2.3141712328751396,
"learning_rate": 9.982605671302293e-06,
"loss": 0.7091,
"step": 120
},
{
"epoch": 0.1297016861219196,
"grad_norm": 2.100396054783955,
"learning_rate": 9.97422809077092e-06,
"loss": 0.7066,
"step": 125
},
{
"epoch": 0.13488975356679636,
"grad_norm": 2.2484885982675413,
"learning_rate": 9.9642140036491e-06,
"loss": 0.7085,
"step": 130
},
{
"epoch": 0.14007782101167315,
"grad_norm": 2.1795476193729413,
"learning_rate": 9.9525667045424e-06,
"loss": 0.6889,
"step": 135
},
{
"epoch": 0.14526588845654995,
"grad_norm": 2.1757051871338593,
"learning_rate": 9.93929002537839e-06,
"loss": 0.6921,
"step": 140
},
{
"epoch": 0.1504539559014267,
"grad_norm": 2.143005235580036,
"learning_rate": 9.924388334145943e-06,
"loss": 0.6907,
"step": 145
},
{
"epoch": 0.1556420233463035,
"grad_norm": 2.1989760690420157,
"learning_rate": 9.90786653345818e-06,
"loss": 0.6912,
"step": 150
},
{
"epoch": 0.1608300907911803,
"grad_norm": 2.004571277860471,
"learning_rate": 9.889730058939529e-06,
"loss": 0.6859,
"step": 155
},
{
"epoch": 0.16601815823605706,
"grad_norm": 2.05691987455993,
"learning_rate": 9.869984877437413e-06,
"loss": 0.6894,
"step": 160
},
{
"epoch": 0.17120622568093385,
"grad_norm": 2.230053895792029,
"learning_rate": 9.848637485059183e-06,
"loss": 0.6814,
"step": 165
},
{
"epoch": 0.17639429312581065,
"grad_norm": 1.9493958638517837,
"learning_rate": 9.82569490503491e-06,
"loss": 0.6731,
"step": 170
},
{
"epoch": 0.1815823605706874,
"grad_norm": 2.133120594361784,
"learning_rate": 9.80116468540677e-06,
"loss": 0.6594,
"step": 175
},
{
"epoch": 0.1867704280155642,
"grad_norm": 2.01624934264464,
"learning_rate": 9.775054896545755e-06,
"loss": 0.6751,
"step": 180
},
{
"epoch": 0.191958495460441,
"grad_norm": 2.1502691215852527,
"learning_rate": 9.747374128496541e-06,
"loss": 0.6789,
"step": 185
},
{
"epoch": 0.19714656290531776,
"grad_norm": 2.0484102083185194,
"learning_rate": 9.718131488151399e-06,
"loss": 0.6676,
"step": 190
},
{
"epoch": 0.20233463035019456,
"grad_norm": 2.0715841424222337,
"learning_rate": 9.687336596254045e-06,
"loss": 0.6616,
"step": 195
},
{
"epoch": 0.20752269779507135,
"grad_norm": 2.012157328183036,
"learning_rate": 9.654999584234444e-06,
"loss": 0.652,
"step": 200
},
{
"epoch": 0.2127107652399481,
"grad_norm": 2.0669739212271923,
"learning_rate": 9.621131090875603e-06,
"loss": 0.6426,
"step": 205
},
{
"epoch": 0.2178988326848249,
"grad_norm": 2.0105636015375143,
"learning_rate": 9.585742258813447e-06,
"loss": 0.6445,
"step": 210
},
{
"epoch": 0.2230869001297017,
"grad_norm": 2.1108266544110688,
"learning_rate": 9.548844730870903e-06,
"loss": 0.6438,
"step": 215
},
{
"epoch": 0.22827496757457846,
"grad_norm": 2.072355913378756,
"learning_rate": 9.51045064622747e-06,
"loss": 0.6565,
"step": 220
},
{
"epoch": 0.23346303501945526,
"grad_norm": 2.166007360772802,
"learning_rate": 9.470572636425451e-06,
"loss": 0.647,
"step": 225
},
{
"epoch": 0.23865110246433205,
"grad_norm": 2.022875957881762,
"learning_rate": 9.429223821214213e-06,
"loss": 0.6325,
"step": 230
},
{
"epoch": 0.2438391699092088,
"grad_norm": 2.006861087987301,
"learning_rate": 9.386417804233836e-06,
"loss": 0.6477,
"step": 235
},
{
"epoch": 0.2490272373540856,
"grad_norm": 2.0140489204477645,
"learning_rate": 9.34216866853954e-06,
"loss": 0.6391,
"step": 240
},
{
"epoch": 0.25421530479896237,
"grad_norm": 1.9489606047213677,
"learning_rate": 9.296490971968416e-06,
"loss": 0.6283,
"step": 245
},
{
"epoch": 0.2594033722438392,
"grad_norm": 2.072486707132733,
"learning_rate": 9.249399742349928e-06,
"loss": 0.6377,
"step": 250
},
{
"epoch": 0.26459143968871596,
"grad_norm": 1.9650189580925839,
"learning_rate": 9.20091047256181e-06,
"loss": 0.6261,
"step": 255
},
{
"epoch": 0.2697795071335927,
"grad_norm": 1.9241991797476943,
"learning_rate": 9.151039115432946e-06,
"loss": 0.6184,
"step": 260
},
{
"epoch": 0.27496757457846954,
"grad_norm": 1.9743470888532664,
"learning_rate": 9.099802078494947e-06,
"loss": 0.6142,
"step": 265
},
{
"epoch": 0.2801556420233463,
"grad_norm": 2.160988187935936,
"learning_rate": 9.047216218584105e-06,
"loss": 0.6094,
"step": 270
},
{
"epoch": 0.2853437094682231,
"grad_norm": 1.9697508480614465,
"learning_rate": 8.993298836295556e-06,
"loss": 0.6196,
"step": 275
},
{
"epoch": 0.2905317769130999,
"grad_norm": 1.8771524751425768,
"learning_rate": 8.93806767029143e-06,
"loss": 0.6163,
"step": 280
},
{
"epoch": 0.29571984435797666,
"grad_norm": 2.125863779805947,
"learning_rate": 8.88154089146488e-06,
"loss": 0.6167,
"step": 285
},
{
"epoch": 0.3009079118028534,
"grad_norm": 2.1188493077731514,
"learning_rate": 8.823737096961916e-06,
"loss": 0.5992,
"step": 290
},
{
"epoch": 0.30609597924773024,
"grad_norm": 2.1335267497592807,
"learning_rate": 8.764675304062992e-06,
"loss": 0.6071,
"step": 295
},
{
"epoch": 0.311284046692607,
"grad_norm": 2.036189297244598,
"learning_rate": 8.704374943926386e-06,
"loss": 0.609,
"step": 300
},
{
"epoch": 0.3164721141374838,
"grad_norm": 1.915927299304865,
"learning_rate": 8.642855855195394e-06,
"loss": 0.5945,
"step": 305
},
{
"epoch": 0.3216601815823606,
"grad_norm": 2.005194485630929,
"learning_rate": 8.580138277471476e-06,
"loss": 0.5959,
"step": 310
},
{
"epoch": 0.32684824902723736,
"grad_norm": 2.1368034472887527,
"learning_rate": 8.516242844655498e-06,
"loss": 0.5941,
"step": 315
},
{
"epoch": 0.3320363164721141,
"grad_norm": 1.9360804934529585,
"learning_rate": 8.45119057815922e-06,
"loss": 0.5915,
"step": 320
},
{
"epoch": 0.33722438391699094,
"grad_norm": 1.9356101875463727,
"learning_rate": 8.385002879989328e-06,
"loss": 0.5838,
"step": 325
},
{
"epoch": 0.3424124513618677,
"grad_norm": 2.4311425501079023,
"learning_rate": 8.317701525706226e-06,
"loss": 0.5946,
"step": 330
},
{
"epoch": 0.3476005188067445,
"grad_norm": 2.356263841306792,
"learning_rate": 8.249308657259943e-06,
"loss": 0.567,
"step": 335
},
{
"epoch": 0.3527885862516213,
"grad_norm": 2.048334150791661,
"learning_rate": 8.179846775705504e-06,
"loss": 0.5795,
"step": 340
},
{
"epoch": 0.35797665369649806,
"grad_norm": 1.9977511587812506,
"learning_rate": 8.109338733800132e-06,
"loss": 0.5751,
"step": 345
},
{
"epoch": 0.3631647211413748,
"grad_norm": 1.8688618314869894,
"learning_rate": 8.03780772848477e-06,
"loss": 0.568,
"step": 350
},
{
"epoch": 0.36835278858625164,
"grad_norm": 1.93022130905715,
"learning_rate": 7.965277293252354e-06,
"loss": 0.5682,
"step": 355
},
{
"epoch": 0.3735408560311284,
"grad_norm": 2.0382225242835528,
"learning_rate": 7.891771290405351e-06,
"loss": 0.5617,
"step": 360
},
{
"epoch": 0.3787289234760052,
"grad_norm": 1.9924209327442368,
"learning_rate": 7.817313903205148e-06,
"loss": 0.5577,
"step": 365
},
{
"epoch": 0.383916990920882,
"grad_norm": 1.9678458173326334,
"learning_rate": 7.741929627915814e-06,
"loss": 0.56,
"step": 370
},
{
"epoch": 0.38910505836575876,
"grad_norm": 2.2405618654805215,
"learning_rate": 7.66564326574491e-06,
"loss": 0.5513,
"step": 375
},
{
"epoch": 0.3942931258106355,
"grad_norm": 1.9971872990885233,
"learning_rate": 7.588479914683954e-06,
"loss": 0.5445,
"step": 380
},
{
"epoch": 0.39948119325551235,
"grad_norm": 2.06807252227761,
"learning_rate": 7.510464961251271e-06,
"loss": 0.5674,
"step": 385
},
{
"epoch": 0.4046692607003891,
"grad_norm": 1.9627368535332135,
"learning_rate": 7.431624072139884e-06,
"loss": 0.5435,
"step": 390
},
{
"epoch": 0.4098573281452659,
"grad_norm": 1.9716804464407136,
"learning_rate": 7.351983185773259e-06,
"loss": 0.5552,
"step": 395
},
{
"epoch": 0.4150453955901427,
"grad_norm": 1.9693396583392846,
"learning_rate": 7.271568503771632e-06,
"loss": 0.5343,
"step": 400
},
{
"epoch": 0.42023346303501946,
"grad_norm": 1.9432949161104107,
"learning_rate": 7.190406482331757e-06,
"loss": 0.5475,
"step": 405
},
{
"epoch": 0.4254215304798962,
"grad_norm": 2.0194917717314045,
"learning_rate": 7.108523823522891e-06,
"loss": 0.5477,
"step": 410
},
{
"epoch": 0.43060959792477305,
"grad_norm": 2.206404974952941,
"learning_rate": 7.0259474665018915e-06,
"loss": 0.5425,
"step": 415
},
{
"epoch": 0.4357976653696498,
"grad_norm": 1.9526533277899327,
"learning_rate": 6.942704578650312e-06,
"loss": 0.5161,
"step": 420
},
{
"epoch": 0.4409857328145266,
"grad_norm": 2.0097466124913117,
"learning_rate": 6.858822546636417e-06,
"loss": 0.5331,
"step": 425
},
{
"epoch": 0.4461738002594034,
"grad_norm": 1.8348649689633039,
"learning_rate": 6.774328967405035e-06,
"loss": 0.523,
"step": 430
},
{
"epoch": 0.45136186770428016,
"grad_norm": 2.139084532722164,
"learning_rate": 6.689251639098261e-06,
"loss": 0.5251,
"step": 435
},
{
"epoch": 0.4565499351491569,
"grad_norm": 1.9708479629081865,
"learning_rate": 6.603618551909935e-06,
"loss": 0.5232,
"step": 440
},
{
"epoch": 0.46173800259403375,
"grad_norm": 1.9331722289768318,
"learning_rate": 6.517457878876958e-06,
"loss": 0.5305,
"step": 445
},
{
"epoch": 0.4669260700389105,
"grad_norm": 1.859009250403284,
"learning_rate": 6.430797966610436e-06,
"loss": 0.5159,
"step": 450
},
{
"epoch": 0.4721141374837873,
"grad_norm": 1.986527066309499,
"learning_rate": 6.343667325969736e-06,
"loss": 0.5367,
"step": 455
},
{
"epoch": 0.4773022049286641,
"grad_norm": 1.9771277544299588,
"learning_rate": 6.256094622682493e-06,
"loss": 0.5123,
"step": 460
},
{
"epoch": 0.48249027237354086,
"grad_norm": 2.0022259730400904,
"learning_rate": 6.168108667913666e-06,
"loss": 0.5166,
"step": 465
},
{
"epoch": 0.4876783398184176,
"grad_norm": 1.9991961519932744,
"learning_rate": 6.079738408786753e-06,
"loss": 0.5161,
"step": 470
},
{
"epoch": 0.49286640726329445,
"grad_norm": 2.0805595238898307,
"learning_rate": 5.9910129188602665e-06,
"loss": 0.5179,
"step": 475
},
{
"epoch": 0.4980544747081712,
"grad_norm": 1.929253006230254,
"learning_rate": 5.9019613885626235e-06,
"loss": 0.5097,
"step": 480
},
{
"epoch": 0.503242542153048,
"grad_norm": 2.25129632838715,
"learning_rate": 5.812613115588575e-06,
"loss": 0.4971,
"step": 485
},
{
"epoch": 0.5084306095979247,
"grad_norm": 1.9119339241166262,
"learning_rate": 5.722997495260348e-06,
"loss": 0.4988,
"step": 490
},
{
"epoch": 0.5136186770428015,
"grad_norm": 1.8300200112998326,
"learning_rate": 5.6331440108566735e-06,
"loss": 0.4941,
"step": 495
},
{
"epoch": 0.5188067444876784,
"grad_norm": 1.9591247994452368,
"learning_rate": 5.543082223912875e-06,
"loss": 0.492,
"step": 500
},
{
"epoch": 0.5239948119325551,
"grad_norm": 1.99136453982626,
"learning_rate": 5.452841764495203e-06,
"loss": 0.5002,
"step": 505
},
{
"epoch": 0.5291828793774319,
"grad_norm": 1.9961024804052654,
"learning_rate": 5.362452321452636e-06,
"loss": 0.4772,
"step": 510
},
{
"epoch": 0.5343709468223087,
"grad_norm": 1.9607124098040063,
"learning_rate": 5.2719436326493255e-06,
"loss": 0.4908,
"step": 515
},
{
"epoch": 0.5395590142671854,
"grad_norm": 1.9303906010446525,
"learning_rate": 5.181345475180941e-06,
"loss": 0.4866,
"step": 520
},
{
"epoch": 0.5447470817120622,
"grad_norm": 2.0420688559734503,
"learning_rate": 5.090687655578078e-06,
"loss": 0.4769,
"step": 525
},
{
"epoch": 0.5499351491569391,
"grad_norm": 1.9908642175713687,
"learning_rate": 5e-06,
"loss": 0.4742,
"step": 530
},
{
"epoch": 0.5551232166018158,
"grad_norm": 1.9960779934532675,
"learning_rate": 4.909312344421923e-06,
"loss": 0.4666,
"step": 535
},
{
"epoch": 0.5603112840466926,
"grad_norm": 1.9274839933909422,
"learning_rate": 4.8186545248190604e-06,
"loss": 0.4866,
"step": 540
},
{
"epoch": 0.5654993514915694,
"grad_norm": 1.9162466337096817,
"learning_rate": 4.7280563673506745e-06,
"loss": 0.4692,
"step": 545
},
{
"epoch": 0.5706874189364461,
"grad_norm": 2.07386431606307,
"learning_rate": 4.637547678547366e-06,
"loss": 0.4859,
"step": 550
},
{
"epoch": 0.5758754863813229,
"grad_norm": 2.0201984812958385,
"learning_rate": 4.547158235504797e-06,
"loss": 0.4718,
"step": 555
},
{
"epoch": 0.5810635538261998,
"grad_norm": 1.95015272613481,
"learning_rate": 4.4569177760871255e-06,
"loss": 0.475,
"step": 560
},
{
"epoch": 0.5862516212710766,
"grad_norm": 1.944586565605588,
"learning_rate": 4.366855989143326e-06,
"loss": 0.4551,
"step": 565
},
{
"epoch": 0.5914396887159533,
"grad_norm": 1.9208589567145171,
"learning_rate": 4.277002504739653e-06,
"loss": 0.4686,
"step": 570
},
{
"epoch": 0.5966277561608301,
"grad_norm": 1.8639671285460482,
"learning_rate": 4.187386884411426e-06,
"loss": 0.4557,
"step": 575
},
{
"epoch": 0.6018158236057068,
"grad_norm": 1.9975578797091653,
"learning_rate": 4.098038611437377e-06,
"loss": 0.4651,
"step": 580
},
{
"epoch": 0.6070038910505836,
"grad_norm": 1.961651938542185,
"learning_rate": 4.008987081139734e-06,
"loss": 0.4643,
"step": 585
},
{
"epoch": 0.6121919584954605,
"grad_norm": 1.9374158302120401,
"learning_rate": 3.920261591213249e-06,
"loss": 0.4556,
"step": 590
},
{
"epoch": 0.6173800259403373,
"grad_norm": 1.9090835435895448,
"learning_rate": 3.8318913320863355e-06,
"loss": 0.4536,
"step": 595
},
{
"epoch": 0.622568093385214,
"grad_norm": 1.8975263865890188,
"learning_rate": 3.7439053773175092e-06,
"loss": 0.4615,
"step": 600
},
{
"epoch": 0.6277561608300908,
"grad_norm": 1.9060390294655216,
"learning_rate": 3.6563326740302664e-06,
"loss": 0.4459,
"step": 605
},
{
"epoch": 0.6329442282749675,
"grad_norm": 1.9725006931962796,
"learning_rate": 3.569202033389565e-06,
"loss": 0.4451,
"step": 610
},
{
"epoch": 0.6381322957198443,
"grad_norm": 1.9621067476956515,
"learning_rate": 3.4825421211230437e-06,
"loss": 0.4419,
"step": 615
},
{
"epoch": 0.6433203631647212,
"grad_norm": 2.098443239659209,
"learning_rate": 3.3963814480900665e-06,
"loss": 0.4415,
"step": 620
},
{
"epoch": 0.648508430609598,
"grad_norm": 1.8981208726840302,
"learning_rate": 3.310748360901741e-06,
"loss": 0.4456,
"step": 625
},
{
"epoch": 0.6536964980544747,
"grad_norm": 1.8947168989269416,
"learning_rate": 3.225671032594966e-06,
"loss": 0.4229,
"step": 630
},
{
"epoch": 0.6588845654993515,
"grad_norm": 2.0138652650509288,
"learning_rate": 3.1411774533635854e-06,
"loss": 0.437,
"step": 635
},
{
"epoch": 0.6640726329442282,
"grad_norm": 1.8903378440015823,
"learning_rate": 3.0572954213496897e-06,
"loss": 0.4454,
"step": 640
},
{
"epoch": 0.669260700389105,
"grad_norm": 1.8448484960177367,
"learning_rate": 2.9740525334981105e-06,
"loss": 0.4398,
"step": 645
},
{
"epoch": 0.6744487678339819,
"grad_norm": 1.9976530631786225,
"learning_rate": 2.8914761764771093e-06,
"loss": 0.429,
"step": 650
},
{
"epoch": 0.6796368352788587,
"grad_norm": 1.9155018572353837,
"learning_rate": 2.809593517668243e-06,
"loss": 0.4309,
"step": 655
},
{
"epoch": 0.6848249027237354,
"grad_norm": 1.942714148946629,
"learning_rate": 2.728431496228369e-06,
"loss": 0.4248,
"step": 660
},
{
"epoch": 0.6900129701686122,
"grad_norm": 2.013023734418392,
"learning_rate": 2.648016814226742e-06,
"loss": 0.4304,
"step": 665
},
{
"epoch": 0.695201037613489,
"grad_norm": 1.9023117871214554,
"learning_rate": 2.5683759278601174e-06,
"loss": 0.4338,
"step": 670
},
{
"epoch": 0.7003891050583657,
"grad_norm": 1.8911448184302957,
"learning_rate": 2.4895350387487304e-06,
"loss": 0.4245,
"step": 675
},
{
"epoch": 0.7055771725032426,
"grad_norm": 2.0358392917626813,
"learning_rate": 2.4115200853160475e-06,
"loss": 0.4194,
"step": 680
},
{
"epoch": 0.7107652399481194,
"grad_norm": 1.9510576677492195,
"learning_rate": 2.3343567342550933e-06,
"loss": 0.4267,
"step": 685
},
{
"epoch": 0.7159533073929961,
"grad_norm": 1.8690267408594539,
"learning_rate": 2.258070372084188e-06,
"loss": 0.4312,
"step": 690
},
{
"epoch": 0.7211413748378729,
"grad_norm": 1.8322122073891454,
"learning_rate": 2.182686096794852e-06,
"loss": 0.4207,
"step": 695
},
{
"epoch": 0.7263294422827496,
"grad_norm": 2.0311002524177253,
"learning_rate": 2.108228709594649e-06,
"loss": 0.4227,
"step": 700
},
{
"epoch": 0.7315175097276264,
"grad_norm": 1.8678394687630775,
"learning_rate": 2.0347227067476478e-06,
"loss": 0.4149,
"step": 705
},
{
"epoch": 0.7367055771725033,
"grad_norm": 1.8521301731665931,
"learning_rate": 1.962192271515232e-06,
"loss": 0.4192,
"step": 710
},
{
"epoch": 0.74189364461738,
"grad_norm": 1.9291143236144128,
"learning_rate": 1.8906612661998698e-06,
"loss": 0.4128,
"step": 715
},
{
"epoch": 0.7470817120622568,
"grad_norm": 1.991134829662921,
"learning_rate": 1.820153224294498e-06,
"loss": 0.4102,
"step": 720
},
{
"epoch": 0.7522697795071336,
"grad_norm": 1.8597303553848081,
"learning_rate": 1.750691342740058e-06,
"loss": 0.4104,
"step": 725
},
{
"epoch": 0.7574578469520103,
"grad_norm": 1.8334844899907363,
"learning_rate": 1.6822984742937764e-06,
"loss": 0.4049,
"step": 730
},
{
"epoch": 0.7626459143968871,
"grad_norm": 1.839241095874111,
"learning_rate": 1.6149971200106723e-06,
"loss": 0.4009,
"step": 735
},
{
"epoch": 0.767833981841764,
"grad_norm": 1.8614288971061537,
"learning_rate": 1.548809421840779e-06,
"loss": 0.4029,
"step": 740
},
{
"epoch": 0.7730220492866408,
"grad_norm": 1.9712640153496117,
"learning_rate": 1.483757155344503e-06,
"loss": 0.4056,
"step": 745
},
{
"epoch": 0.7782101167315175,
"grad_norm": 1.7973975593361922,
"learning_rate": 1.4198617225285244e-06,
"loss": 0.409,
"step": 750
},
{
"epoch": 0.7833981841763943,
"grad_norm": 1.8870139707940816,
"learning_rate": 1.3571441448046086e-06,
"loss": 0.4117,
"step": 755
},
{
"epoch": 0.788586251621271,
"grad_norm": 1.9255920717839368,
"learning_rate": 1.2956250560736143e-06,
"loss": 0.4097,
"step": 760
},
{
"epoch": 0.7937743190661478,
"grad_norm": 1.8605570734597534,
"learning_rate": 1.2353246959370086e-06,
"loss": 0.3885,
"step": 765
},
{
"epoch": 0.7989623865110247,
"grad_norm": 1.8678208345700735,
"learning_rate": 1.1762629030380867e-06,
"loss": 0.4044,
"step": 770
},
{
"epoch": 0.8041504539559015,
"grad_norm": 1.8406724398818959,
"learning_rate": 1.118459108535122e-06,
"loss": 0.3991,
"step": 775
},
{
"epoch": 0.8093385214007782,
"grad_norm": 1.9447178497450672,
"learning_rate": 1.061932329708572e-06,
"loss": 0.3878,
"step": 780
},
{
"epoch": 0.814526588845655,
"grad_norm": 1.903347183666585,
"learning_rate": 1.006701163704445e-06,
"loss": 0.3994,
"step": 785
},
{
"epoch": 0.8197146562905318,
"grad_norm": 1.7817648380438804,
"learning_rate": 9.527837814158963e-07,
"loss": 0.3943,
"step": 790
},
{
"epoch": 0.8249027237354085,
"grad_norm": 1.89718875917406,
"learning_rate": 9.001979215050544e-07,
"loss": 0.3929,
"step": 795
},
{
"epoch": 0.8300907911802854,
"grad_norm": 1.8825895138353903,
"learning_rate": 8.489608845670527e-07,
"loss": 0.3924,
"step": 800
},
{
"epoch": 0.8352788586251622,
"grad_norm": 1.7908515887362904,
"learning_rate": 7.99089527438191e-07,
"loss": 0.3919,
"step": 805
},
{
"epoch": 0.8404669260700389,
"grad_norm": 2.0314129121613034,
"learning_rate": 7.506002576500732e-07,
"loss": 0.3941,
"step": 810
},
{
"epoch": 0.8456549935149157,
"grad_norm": 1.838737045068825,
"learning_rate": 7.035090280315854e-07,
"loss": 0.398,
"step": 815
},
{
"epoch": 0.8508430609597925,
"grad_norm": 1.8701463175206698,
"learning_rate": 6.578313314604612e-07,
"loss": 0.395,
"step": 820
},
{
"epoch": 0.8560311284046692,
"grad_norm": 1.8734176088672492,
"learning_rate": 6.135821957661658e-07,
"loss": 0.3945,
"step": 825
},
{
"epoch": 0.8612191958495461,
"grad_norm": 1.8454123160341045,
"learning_rate": 5.707761787857879e-07,
"loss": 0.3855,
"step": 830
},
{
"epoch": 0.8664072632944229,
"grad_norm": 1.7755466173110739,
"learning_rate": 5.294273635745517e-07,
"loss": 0.3971,
"step": 835
},
{
"epoch": 0.8715953307392996,
"grad_norm": 1.8394864397787671,
"learning_rate": 4.895493537725326e-07,
"loss": 0.3966,
"step": 840
},
{
"epoch": 0.8767833981841764,
"grad_norm": 1.7915928948304078,
"learning_rate": 4.511552691290988e-07,
"loss": 0.3979,
"step": 845
},
{
"epoch": 0.8819714656290532,
"grad_norm": 1.7863787006400424,
"learning_rate": 4.1425774118655505e-07,
"loss": 0.3826,
"step": 850
},
{
"epoch": 0.8871595330739299,
"grad_norm": 1.8024141112662704,
"learning_rate": 3.7886890912439633e-07,
"loss": 0.3862,
"step": 855
},
{
"epoch": 0.8923476005188068,
"grad_norm": 1.8180972720099156,
"learning_rate": 3.4500041576555733e-07,
"loss": 0.3859,
"step": 860
},
{
"epoch": 0.8975356679636836,
"grad_norm": 1.7622051589037506,
"learning_rate": 3.1266340374595693e-07,
"loss": 0.3831,
"step": 865
},
{
"epoch": 0.9027237354085603,
"grad_norm": 1.7926800043760007,
"learning_rate": 2.818685118486025e-07,
"loss": 0.3927,
"step": 870
},
{
"epoch": 0.9079118028534371,
"grad_norm": 1.8515815235983688,
"learning_rate": 2.526258715034602e-07,
"loss": 0.3787,
"step": 875
},
{
"epoch": 0.9130998702983139,
"grad_norm": 1.761786866550431,
"learning_rate": 2.2494510345424657e-07,
"loss": 0.3881,
"step": 880
},
{
"epoch": 0.9182879377431906,
"grad_norm": 1.731506703869926,
"learning_rate": 1.988353145932298e-07,
"loss": 0.3762,
"step": 885
},
{
"epoch": 0.9234760051880675,
"grad_norm": 1.8427166106595052,
"learning_rate": 1.7430509496508985e-07,
"loss": 0.3975,
"step": 890
},
{
"epoch": 0.9286640726329443,
"grad_norm": 1.761769698023775,
"learning_rate": 1.5136251494081822e-07,
"loss": 0.3842,
"step": 895
},
{
"epoch": 0.933852140077821,
"grad_norm": 1.8297504100937483,
"learning_rate": 1.3001512256258841e-07,
"loss": 0.3916,
"step": 900
},
{
"epoch": 0.9390402075226978,
"grad_norm": 1.8143369848190358,
"learning_rate": 1.1026994106047296e-07,
"loss": 0.3911,
"step": 905
},
{
"epoch": 0.9442282749675746,
"grad_norm": 1.7462314691918333,
"learning_rate": 9.213346654182054e-08,
"loss": 0.3888,
"step": 910
},
{
"epoch": 0.9494163424124513,
"grad_norm": 1.842285372864709,
"learning_rate": 7.561166585405789e-08,
"loss": 0.3823,
"step": 915
},
{
"epoch": 0.9546044098573282,
"grad_norm": 1.798454935332072,
"learning_rate": 6.070997462161055e-08,
"loss": 0.4032,
"step": 920
},
{
"epoch": 0.959792477302205,
"grad_norm": 1.8579672164577692,
"learning_rate": 4.743329545760122e-08,
"loss": 0.3811,
"step": 925
},
{
"epoch": 0.9649805447470817,
"grad_norm": 1.764976690651984,
"learning_rate": 3.578599635090163e-08,
"loss": 0.3806,
"step": 930
},
{
"epoch": 0.9701686121919585,
"grad_norm": 1.7085373084916373,
"learning_rate": 2.577190922908035e-08,
"loss": 0.3888,
"step": 935
},
{
"epoch": 0.9753566796368353,
"grad_norm": 1.7431684765639506,
"learning_rate": 1.7394328697707407e-08,
"loss": 0.3901,
"step": 940
},
{
"epoch": 0.980544747081712,
"grad_norm": 1.8495600056895127,
"learning_rate": 1.0656010956437979e-08,
"loss": 0.3918,
"step": 945
},
{
"epoch": 0.9857328145265889,
"grad_norm": 1.8616847493274582,
"learning_rate": 5.5591728922316235e-09,
"loss": 0.3895,
"step": 950
},
{
"epoch": 0.9909208819714657,
"grad_norm": 1.8274058784400706,
"learning_rate": 2.1054913500051512e-09,
"loss": 0.3831,
"step": 955
},
{
"epoch": 0.9961089494163424,
"grad_norm": 1.7888916632814764,
"learning_rate": 2.9610258095169596e-10,
"loss": 0.3863,
"step": 960
},
{
"epoch": 0.9992217898832685,
"eval_loss": 0.35284245014190674,
"eval_runtime": 0.9437,
"eval_samples_per_second": 2.119,
"eval_steps_per_second": 1.06,
"step": 963
},
{
"epoch": 0.9992217898832685,
"step": 963,
"total_flos": 201580263505920.0,
"train_loss": 0.5411187405403034,
"train_runtime": 23935.6127,
"train_samples_per_second": 1.288,
"train_steps_per_second": 0.04
}
],
"logging_steps": 5,
"max_steps": 963,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 201580263505920.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}