cvapict's picture
End of training
f52379d verified
{
"best_metric": 0.782258064516129,
"best_model_checkpoint": "distilbert-base-multilingual-cased-hyper-matt/run-b96vw1xk/checkpoint-1600",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025,
"grad_norm": 5.98245096206665,
"learning_rate": 6.86725095664584e-05,
"loss": 0.7926,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 3.779205083847046,
"learning_rate": 6.838517689044811e-05,
"loss": 0.5616,
"step": 20
},
{
"epoch": 0.075,
"grad_norm": 2.2702927589416504,
"learning_rate": 6.809784421443783e-05,
"loss": 0.5017,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 3.7793309688568115,
"learning_rate": 6.781051153842753e-05,
"loss": 0.5253,
"step": 40
},
{
"epoch": 0.125,
"grad_norm": 3.156172037124634,
"learning_rate": 6.752317886241725e-05,
"loss": 0.5188,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 5.016664028167725,
"learning_rate": 6.723584618640696e-05,
"loss": 0.6648,
"step": 60
},
{
"epoch": 0.175,
"grad_norm": 1.7784695625305176,
"learning_rate": 6.694851351039668e-05,
"loss": 0.5801,
"step": 70
},
{
"epoch": 0.2,
"grad_norm": 11.989581108093262,
"learning_rate": 6.66611808343864e-05,
"loss": 0.496,
"step": 80
},
{
"epoch": 0.225,
"grad_norm": 5.740185260772705,
"learning_rate": 6.63738481583761e-05,
"loss": 0.3496,
"step": 90
},
{
"epoch": 0.25,
"grad_norm": 11.86522388458252,
"learning_rate": 6.608651548236582e-05,
"loss": 0.66,
"step": 100
},
{
"epoch": 0.275,
"grad_norm": 9.92679500579834,
"learning_rate": 6.579918280635554e-05,
"loss": 0.5828,
"step": 110
},
{
"epoch": 0.3,
"grad_norm": 8.171853065490723,
"learning_rate": 6.551185013034525e-05,
"loss": 0.3788,
"step": 120
},
{
"epoch": 0.325,
"grad_norm": 39.43735122680664,
"learning_rate": 6.522451745433495e-05,
"loss": 0.3466,
"step": 130
},
{
"epoch": 0.35,
"grad_norm": 1.7751743793487549,
"learning_rate": 6.493718477832467e-05,
"loss": 0.6856,
"step": 140
},
{
"epoch": 0.375,
"grad_norm": 7.556205749511719,
"learning_rate": 6.464985210231439e-05,
"loss": 0.4796,
"step": 150
},
{
"epoch": 0.4,
"grad_norm": 0.3989664614200592,
"learning_rate": 6.43625194263041e-05,
"loss": 0.4682,
"step": 160
},
{
"epoch": 0.425,
"grad_norm": 0.15143923461437225,
"learning_rate": 6.407518675029382e-05,
"loss": 0.1345,
"step": 170
},
{
"epoch": 0.45,
"grad_norm": 0.18106195330619812,
"learning_rate": 6.378785407428354e-05,
"loss": 0.8303,
"step": 180
},
{
"epoch": 0.475,
"grad_norm": 3.5019495487213135,
"learning_rate": 6.350052139827324e-05,
"loss": 0.8657,
"step": 190
},
{
"epoch": 0.5,
"grad_norm": 1.2987595796585083,
"learning_rate": 6.321318872226296e-05,
"loss": 0.4105,
"step": 200
},
{
"epoch": 0.525,
"grad_norm": 1.5586305856704712,
"learning_rate": 6.292585604625267e-05,
"loss": 0.4088,
"step": 210
},
{
"epoch": 0.55,
"grad_norm": 2.058013677597046,
"learning_rate": 6.263852337024239e-05,
"loss": 0.6887,
"step": 220
},
{
"epoch": 0.575,
"grad_norm": 7.093258857727051,
"learning_rate": 6.23511906942321e-05,
"loss": 0.5411,
"step": 230
},
{
"epoch": 0.6,
"grad_norm": 0.5164862275123596,
"learning_rate": 6.206385801822181e-05,
"loss": 0.1666,
"step": 240
},
{
"epoch": 0.625,
"grad_norm": 17.518468856811523,
"learning_rate": 6.177652534221153e-05,
"loss": 0.6055,
"step": 250
},
{
"epoch": 0.65,
"grad_norm": 0.831436276435852,
"learning_rate": 6.148919266620125e-05,
"loss": 0.6117,
"step": 260
},
{
"epoch": 0.675,
"grad_norm": 0.5291246771812439,
"learning_rate": 6.120185999019096e-05,
"loss": 0.4182,
"step": 270
},
{
"epoch": 0.7,
"grad_norm": 6.783484935760498,
"learning_rate": 6.091452731418067e-05,
"loss": 1.0706,
"step": 280
},
{
"epoch": 0.725,
"grad_norm": 3.2058608531951904,
"learning_rate": 6.062719463817038e-05,
"loss": 0.5444,
"step": 290
},
{
"epoch": 0.75,
"grad_norm": 0.7039594650268555,
"learning_rate": 6.0339861962160095e-05,
"loss": 0.7653,
"step": 300
},
{
"epoch": 0.775,
"grad_norm": 3.093994379043579,
"learning_rate": 6.0052529286149814e-05,
"loss": 0.3543,
"step": 310
},
{
"epoch": 0.8,
"grad_norm": 5.90542459487915,
"learning_rate": 5.976519661013953e-05,
"loss": 0.5,
"step": 320
},
{
"epoch": 0.825,
"grad_norm": 99.38902282714844,
"learning_rate": 5.947786393412924e-05,
"loss": 0.647,
"step": 330
},
{
"epoch": 0.85,
"grad_norm": 2.3223488330841064,
"learning_rate": 5.9190531258118945e-05,
"loss": 0.5445,
"step": 340
},
{
"epoch": 0.875,
"grad_norm": 9.170907974243164,
"learning_rate": 5.8903198582108665e-05,
"loss": 0.5613,
"step": 350
},
{
"epoch": 0.9,
"grad_norm": 13.541004180908203,
"learning_rate": 5.861586590609838e-05,
"loss": 0.6044,
"step": 360
},
{
"epoch": 0.925,
"grad_norm": 5.797645568847656,
"learning_rate": 5.832853323008809e-05,
"loss": 0.269,
"step": 370
},
{
"epoch": 0.95,
"grad_norm": 1.2870230674743652,
"learning_rate": 5.804120055407781e-05,
"loss": 0.3386,
"step": 380
},
{
"epoch": 0.975,
"grad_norm": 33.73625564575195,
"learning_rate": 5.775386787806752e-05,
"loss": 0.5606,
"step": 390
},
{
"epoch": 1.0,
"grad_norm": 0.2897490859031677,
"learning_rate": 5.7466535202057235e-05,
"loss": 0.1254,
"step": 400
},
{
"epoch": 1.0,
"eval_accuracy": 0.8075,
"eval_f1": 0.7116104868913857,
"eval_loss": 0.8214923143386841,
"eval_precision": 0.6597222222222222,
"eval_recall": 0.7723577235772358,
"eval_runtime": 1.5237,
"eval_samples_per_second": 262.523,
"eval_steps_per_second": 16.408,
"step": 400
},
{
"epoch": 1.025,
"grad_norm": 52.07731628417969,
"learning_rate": 5.7179202526046954e-05,
"loss": 0.3581,
"step": 410
},
{
"epoch": 1.05,
"grad_norm": 94.3672103881836,
"learning_rate": 5.689186985003666e-05,
"loss": 0.3194,
"step": 420
},
{
"epoch": 1.075,
"grad_norm": 14.673309326171875,
"learning_rate": 5.660453717402637e-05,
"loss": 0.7354,
"step": 430
},
{
"epoch": 1.1,
"grad_norm": 0.20094430446624756,
"learning_rate": 5.631720449801609e-05,
"loss": 0.1143,
"step": 440
},
{
"epoch": 1.125,
"grad_norm": 0.20857423543930054,
"learning_rate": 5.6029871822005805e-05,
"loss": 0.1254,
"step": 450
},
{
"epoch": 1.15,
"grad_norm": 40.71461868286133,
"learning_rate": 5.574253914599552e-05,
"loss": 0.5379,
"step": 460
},
{
"epoch": 1.175,
"grad_norm": 0.15622340142726898,
"learning_rate": 5.545520646998524e-05,
"loss": 0.3862,
"step": 470
},
{
"epoch": 1.2,
"grad_norm": 0.10756005346775055,
"learning_rate": 5.516787379397495e-05,
"loss": 0.3003,
"step": 480
},
{
"epoch": 1.225,
"grad_norm": 39.451805114746094,
"learning_rate": 5.4880541117964655e-05,
"loss": 0.3146,
"step": 490
},
{
"epoch": 1.25,
"grad_norm": 12.270801544189453,
"learning_rate": 5.459320844195437e-05,
"loss": 0.1625,
"step": 500
},
{
"epoch": 1.275,
"grad_norm": 0.09382086992263794,
"learning_rate": 5.430587576594409e-05,
"loss": 0.217,
"step": 510
},
{
"epoch": 1.3,
"grad_norm": 22.632944107055664,
"learning_rate": 5.40185430899338e-05,
"loss": 0.7097,
"step": 520
},
{
"epoch": 1.325,
"grad_norm": 157.30760192871094,
"learning_rate": 5.373121041392351e-05,
"loss": 0.5452,
"step": 530
},
{
"epoch": 1.35,
"grad_norm": 0.40543922781944275,
"learning_rate": 5.344387773791323e-05,
"loss": 0.2768,
"step": 540
},
{
"epoch": 1.375,
"grad_norm": 35.09124755859375,
"learning_rate": 5.3156545061902945e-05,
"loss": 0.3595,
"step": 550
},
{
"epoch": 1.4,
"grad_norm": 7.142502307891846,
"learning_rate": 5.286921238589266e-05,
"loss": 0.8111,
"step": 560
},
{
"epoch": 1.425,
"grad_norm": 0.29879313707351685,
"learning_rate": 5.258187970988237e-05,
"loss": 0.4086,
"step": 570
},
{
"epoch": 1.45,
"grad_norm": 0.5238781571388245,
"learning_rate": 5.229454703387208e-05,
"loss": 0.326,
"step": 580
},
{
"epoch": 1.475,
"grad_norm": 0.3895421326160431,
"learning_rate": 5.2007214357861795e-05,
"loss": 0.596,
"step": 590
},
{
"epoch": 1.5,
"grad_norm": 0.38695767521858215,
"learning_rate": 5.171988168185151e-05,
"loss": 0.3458,
"step": 600
},
{
"epoch": 1.525,
"grad_norm": 5.45625638961792,
"learning_rate": 5.143254900584123e-05,
"loss": 0.5599,
"step": 610
},
{
"epoch": 1.55,
"grad_norm": 0.23768211901187897,
"learning_rate": 5.114521632983094e-05,
"loss": 0.1361,
"step": 620
},
{
"epoch": 1.575,
"grad_norm": 5.877163887023926,
"learning_rate": 5.085788365382065e-05,
"loss": 0.508,
"step": 630
},
{
"epoch": 1.6,
"grad_norm": 0.17268578708171844,
"learning_rate": 5.0570550977810365e-05,
"loss": 0.3897,
"step": 640
},
{
"epoch": 1.625,
"grad_norm": 0.21864362061023712,
"learning_rate": 5.028321830180008e-05,
"loss": 0.1208,
"step": 650
},
{
"epoch": 1.65,
"grad_norm": 0.19524432718753815,
"learning_rate": 4.999588562578979e-05,
"loss": 0.5982,
"step": 660
},
{
"epoch": 1.675,
"grad_norm": 11.239200592041016,
"learning_rate": 4.970855294977951e-05,
"loss": 1.0051,
"step": 670
},
{
"epoch": 1.7,
"grad_norm": 35.140869140625,
"learning_rate": 4.942122027376922e-05,
"loss": 0.3072,
"step": 680
},
{
"epoch": 1.725,
"grad_norm": 0.23546864092350006,
"learning_rate": 4.9133887597758935e-05,
"loss": 0.3724,
"step": 690
},
{
"epoch": 1.75,
"grad_norm": 0.29642027616500854,
"learning_rate": 4.8846554921748655e-05,
"loss": 0.4596,
"step": 700
},
{
"epoch": 1.775,
"grad_norm": 0.23364444077014923,
"learning_rate": 4.855922224573837e-05,
"loss": 0.5389,
"step": 710
},
{
"epoch": 1.8,
"grad_norm": 0.3376547694206238,
"learning_rate": 4.827188956972807e-05,
"loss": 0.4458,
"step": 720
},
{
"epoch": 1.825,
"grad_norm": 0.7468113303184509,
"learning_rate": 4.7984556893717786e-05,
"loss": 0.319,
"step": 730
},
{
"epoch": 1.85,
"grad_norm": 17.83005714416504,
"learning_rate": 4.7697224217707505e-05,
"loss": 0.5851,
"step": 740
},
{
"epoch": 1.875,
"grad_norm": 16.729862213134766,
"learning_rate": 4.740989154169722e-05,
"loss": 0.3673,
"step": 750
},
{
"epoch": 1.9,
"grad_norm": 0.49153897166252136,
"learning_rate": 4.712255886568693e-05,
"loss": 0.5251,
"step": 760
},
{
"epoch": 1.925,
"grad_norm": 0.2441895753145218,
"learning_rate": 4.683522618967665e-05,
"loss": 0.0277,
"step": 770
},
{
"epoch": 1.95,
"grad_norm": 0.25980064272880554,
"learning_rate": 4.654789351366636e-05,
"loss": 0.3944,
"step": 780
},
{
"epoch": 1.975,
"grad_norm": 32.66210174560547,
"learning_rate": 4.626056083765607e-05,
"loss": 0.4535,
"step": 790
},
{
"epoch": 2.0,
"grad_norm": 0.25817617774009705,
"learning_rate": 4.597322816164579e-05,
"loss": 0.9337,
"step": 800
},
{
"epoch": 2.0,
"eval_accuracy": 0.8475,
"eval_f1": 0.7530364372469636,
"eval_loss": 0.5323335528373718,
"eval_precision": 0.75,
"eval_recall": 0.7560975609756098,
"eval_runtime": 1.5178,
"eval_samples_per_second": 263.536,
"eval_steps_per_second": 16.471,
"step": 800
},
{
"epoch": 2.025,
"grad_norm": 0.13575178384780884,
"learning_rate": 4.56858954856355e-05,
"loss": 0.2043,
"step": 810
},
{
"epoch": 2.05,
"grad_norm": 7.373598575592041,
"learning_rate": 4.539856280962521e-05,
"loss": 0.1924,
"step": 820
},
{
"epoch": 2.075,
"grad_norm": 0.44483116269111633,
"learning_rate": 4.511123013361493e-05,
"loss": 0.4519,
"step": 830
},
{
"epoch": 2.1,
"grad_norm": 0.13262860476970673,
"learning_rate": 4.4823897457604645e-05,
"loss": 0.3771,
"step": 840
},
{
"epoch": 2.125,
"grad_norm": 0.11602489650249481,
"learning_rate": 4.453656478159436e-05,
"loss": 0.2159,
"step": 850
},
{
"epoch": 2.15,
"grad_norm": 0.2182077318429947,
"learning_rate": 4.424923210558408e-05,
"loss": 0.1634,
"step": 860
},
{
"epoch": 2.175,
"grad_norm": 0.06706108897924423,
"learning_rate": 4.396189942957378e-05,
"loss": 0.5848,
"step": 870
},
{
"epoch": 2.2,
"grad_norm": 3.63909649848938,
"learning_rate": 4.3674566753563496e-05,
"loss": 0.3396,
"step": 880
},
{
"epoch": 2.225,
"grad_norm": 0.23763427138328552,
"learning_rate": 4.338723407755321e-05,
"loss": 0.3802,
"step": 890
},
{
"epoch": 2.25,
"grad_norm": 0.18135060369968414,
"learning_rate": 4.309990140154293e-05,
"loss": 0.302,
"step": 900
},
{
"epoch": 2.275,
"grad_norm": 1.8485811948776245,
"learning_rate": 4.281256872553264e-05,
"loss": 0.2594,
"step": 910
},
{
"epoch": 2.3,
"grad_norm": 0.19517390429973602,
"learning_rate": 4.252523604952235e-05,
"loss": 0.2704,
"step": 920
},
{
"epoch": 2.325,
"grad_norm": 6.400895595550537,
"learning_rate": 4.223790337351207e-05,
"loss": 0.2955,
"step": 930
},
{
"epoch": 2.35,
"grad_norm": 0.06735412031412125,
"learning_rate": 4.195057069750178e-05,
"loss": 0.3132,
"step": 940
},
{
"epoch": 2.375,
"grad_norm": 38.814788818359375,
"learning_rate": 4.166323802149149e-05,
"loss": 0.711,
"step": 950
},
{
"epoch": 2.4,
"grad_norm": 22.11007308959961,
"learning_rate": 4.137590534548121e-05,
"loss": 0.1424,
"step": 960
},
{
"epoch": 2.425,
"grad_norm": 10.919371604919434,
"learning_rate": 4.108857266947092e-05,
"loss": 0.5337,
"step": 970
},
{
"epoch": 2.45,
"grad_norm": 0.7433808445930481,
"learning_rate": 4.0801239993460636e-05,
"loss": 0.3412,
"step": 980
},
{
"epoch": 2.475,
"grad_norm": 0.2389543056488037,
"learning_rate": 4.0513907317450355e-05,
"loss": 0.6012,
"step": 990
},
{
"epoch": 2.5,
"grad_norm": 0.2541622221469879,
"learning_rate": 4.022657464144007e-05,
"loss": 0.1928,
"step": 1000
},
{
"epoch": 2.525,
"grad_norm": 12.578326225280762,
"learning_rate": 3.993924196542978e-05,
"loss": 0.4368,
"step": 1010
},
{
"epoch": 2.55,
"grad_norm": 30.986812591552734,
"learning_rate": 3.9651909289419486e-05,
"loss": 0.3239,
"step": 1020
},
{
"epoch": 2.575,
"grad_norm": 21.057029724121094,
"learning_rate": 3.9364576613409206e-05,
"loss": 0.2249,
"step": 1030
},
{
"epoch": 2.6,
"grad_norm": 0.1368197500705719,
"learning_rate": 3.907724393739892e-05,
"loss": 0.0661,
"step": 1040
},
{
"epoch": 2.625,
"grad_norm": 1.004150629043579,
"learning_rate": 3.878991126138863e-05,
"loss": 0.3798,
"step": 1050
},
{
"epoch": 2.65,
"grad_norm": 0.07571277022361755,
"learning_rate": 3.850257858537835e-05,
"loss": 0.1526,
"step": 1060
},
{
"epoch": 2.675,
"grad_norm": 0.07642961293458939,
"learning_rate": 3.821524590936806e-05,
"loss": 0.1443,
"step": 1070
},
{
"epoch": 2.7,
"grad_norm": 0.21973338723182678,
"learning_rate": 3.7927913233357776e-05,
"loss": 0.3005,
"step": 1080
},
{
"epoch": 2.725,
"grad_norm": 0.155073881149292,
"learning_rate": 3.764058055734749e-05,
"loss": 0.5231,
"step": 1090
},
{
"epoch": 2.75,
"grad_norm": 43.628150939941406,
"learning_rate": 3.73532478813372e-05,
"loss": 0.4226,
"step": 1100
},
{
"epoch": 2.775,
"grad_norm": 130.1124267578125,
"learning_rate": 3.7065915205326914e-05,
"loss": 0.3133,
"step": 1110
},
{
"epoch": 2.8,
"grad_norm": 0.1950320452451706,
"learning_rate": 3.677858252931663e-05,
"loss": 0.4493,
"step": 1120
},
{
"epoch": 2.825,
"grad_norm": 0.6290574073791504,
"learning_rate": 3.6491249853306346e-05,
"loss": 0.4953,
"step": 1130
},
{
"epoch": 2.85,
"grad_norm": 0.25695496797561646,
"learning_rate": 3.620391717729606e-05,
"loss": 0.0153,
"step": 1140
},
{
"epoch": 2.875,
"grad_norm": 0.1715123951435089,
"learning_rate": 3.591658450128578e-05,
"loss": 0.0058,
"step": 1150
},
{
"epoch": 2.9,
"grad_norm": 6.9658203125,
"learning_rate": 3.562925182527549e-05,
"loss": 0.3366,
"step": 1160
},
{
"epoch": 2.925,
"grad_norm": 0.6045228242874146,
"learning_rate": 3.5341919149265196e-05,
"loss": 0.136,
"step": 1170
},
{
"epoch": 2.95,
"grad_norm": 30.85106658935547,
"learning_rate": 3.505458647325491e-05,
"loss": 0.5502,
"step": 1180
},
{
"epoch": 2.975,
"grad_norm": 98.63839721679688,
"learning_rate": 3.476725379724463e-05,
"loss": 0.1225,
"step": 1190
},
{
"epoch": 3.0,
"grad_norm": 18.41563606262207,
"learning_rate": 3.447992112123434e-05,
"loss": 0.3212,
"step": 1200
},
{
"epoch": 3.0,
"eval_accuracy": 0.855,
"eval_f1": 0.7387387387387387,
"eval_loss": 0.6354687213897705,
"eval_precision": 0.8282828282828283,
"eval_recall": 0.6666666666666666,
"eval_runtime": 1.5216,
"eval_samples_per_second": 262.886,
"eval_steps_per_second": 16.43,
"step": 1200
},
{
"epoch": 3.025,
"grad_norm": 0.07740258425474167,
"learning_rate": 3.4192588445224053e-05,
"loss": 0.0831,
"step": 1210
},
{
"epoch": 3.05,
"grad_norm": 0.039124973118305206,
"learning_rate": 3.3905255769213766e-05,
"loss": 0.0094,
"step": 1220
},
{
"epoch": 3.075,
"grad_norm": 0.262672483921051,
"learning_rate": 3.361792309320348e-05,
"loss": 0.0112,
"step": 1230
},
{
"epoch": 3.1,
"grad_norm": 0.05633651092648506,
"learning_rate": 3.33305904171932e-05,
"loss": 0.5297,
"step": 1240
},
{
"epoch": 3.125,
"grad_norm": 0.2261894792318344,
"learning_rate": 3.304325774118291e-05,
"loss": 0.0071,
"step": 1250
},
{
"epoch": 3.15,
"grad_norm": 0.0650223046541214,
"learning_rate": 3.2755925065172623e-05,
"loss": 0.4392,
"step": 1260
},
{
"epoch": 3.175,
"grad_norm": 0.189041867852211,
"learning_rate": 3.2468592389162336e-05,
"loss": 0.2144,
"step": 1270
},
{
"epoch": 3.2,
"grad_norm": 0.08814287185668945,
"learning_rate": 3.218125971315205e-05,
"loss": 0.111,
"step": 1280
},
{
"epoch": 3.225,
"grad_norm": 0.0703461691737175,
"learning_rate": 3.189392703714177e-05,
"loss": 0.1813,
"step": 1290
},
{
"epoch": 3.25,
"grad_norm": 0.10721047967672348,
"learning_rate": 3.160659436113148e-05,
"loss": 0.1438,
"step": 1300
},
{
"epoch": 3.275,
"grad_norm": 37.45782470703125,
"learning_rate": 3.1319261685121193e-05,
"loss": 0.2178,
"step": 1310
},
{
"epoch": 3.3,
"grad_norm": 0.05508900806307793,
"learning_rate": 3.1031929009110906e-05,
"loss": 0.0099,
"step": 1320
},
{
"epoch": 3.325,
"grad_norm": 0.15349701046943665,
"learning_rate": 3.0744596333100625e-05,
"loss": 0.1482,
"step": 1330
},
{
"epoch": 3.35,
"grad_norm": 0.027314379811286926,
"learning_rate": 3.0457263657090335e-05,
"loss": 0.1671,
"step": 1340
},
{
"epoch": 3.375,
"grad_norm": 0.04863934591412544,
"learning_rate": 3.0169930981080047e-05,
"loss": 0.084,
"step": 1350
},
{
"epoch": 3.4,
"grad_norm": 0.3029322028160095,
"learning_rate": 2.9882598305069763e-05,
"loss": 0.3081,
"step": 1360
},
{
"epoch": 3.425,
"grad_norm": 15.118460655212402,
"learning_rate": 2.9595265629059473e-05,
"loss": 0.6964,
"step": 1370
},
{
"epoch": 3.45,
"grad_norm": 0.695101797580719,
"learning_rate": 2.930793295304919e-05,
"loss": 0.2271,
"step": 1380
},
{
"epoch": 3.475,
"grad_norm": 0.11346480995416641,
"learning_rate": 2.9020600277038905e-05,
"loss": 0.0131,
"step": 1390
},
{
"epoch": 3.5,
"grad_norm": 0.31482186913490295,
"learning_rate": 2.8733267601028617e-05,
"loss": 0.113,
"step": 1400
},
{
"epoch": 3.525,
"grad_norm": 6.88314962387085,
"learning_rate": 2.844593492501833e-05,
"loss": 0.2534,
"step": 1410
},
{
"epoch": 3.55,
"grad_norm": 0.08088881522417068,
"learning_rate": 2.8158602249008046e-05,
"loss": 0.3451,
"step": 1420
},
{
"epoch": 3.575,
"grad_norm": 0.0832696482539177,
"learning_rate": 2.787126957299776e-05,
"loss": 0.1342,
"step": 1430
},
{
"epoch": 3.6,
"grad_norm": 3.7705042362213135,
"learning_rate": 2.7583936896987475e-05,
"loss": 0.117,
"step": 1440
},
{
"epoch": 3.625,
"grad_norm": 0.1864766925573349,
"learning_rate": 2.7296604220977184e-05,
"loss": 0.1679,
"step": 1450
},
{
"epoch": 3.65,
"grad_norm": 0.06913910061120987,
"learning_rate": 2.70092715449669e-05,
"loss": 0.0648,
"step": 1460
},
{
"epoch": 3.675,
"grad_norm": 0.08957911282777786,
"learning_rate": 2.6721938868956616e-05,
"loss": 0.1713,
"step": 1470
},
{
"epoch": 3.7,
"grad_norm": 0.07312174141407013,
"learning_rate": 2.643460619294633e-05,
"loss": 0.0859,
"step": 1480
},
{
"epoch": 3.725,
"grad_norm": 0.062311191111803055,
"learning_rate": 2.614727351693604e-05,
"loss": 0.4953,
"step": 1490
},
{
"epoch": 3.75,
"grad_norm": 2.242743492126465,
"learning_rate": 2.5859940840925754e-05,
"loss": 0.3032,
"step": 1500
},
{
"epoch": 3.775,
"grad_norm": 7.94291877746582,
"learning_rate": 2.557260816491547e-05,
"loss": 0.2461,
"step": 1510
},
{
"epoch": 3.8,
"grad_norm": 0.12085115909576416,
"learning_rate": 2.5285275488905183e-05,
"loss": 0.2241,
"step": 1520
},
{
"epoch": 3.825,
"grad_norm": 9.449928283691406,
"learning_rate": 2.4997942812894895e-05,
"loss": 0.4273,
"step": 1530
},
{
"epoch": 3.85,
"grad_norm": 0.29892095923423767,
"learning_rate": 2.471061013688461e-05,
"loss": 0.1848,
"step": 1540
},
{
"epoch": 3.875,
"grad_norm": 0.09345678985118866,
"learning_rate": 2.4423277460874327e-05,
"loss": 0.2347,
"step": 1550
},
{
"epoch": 3.9,
"grad_norm": 0.1047554761171341,
"learning_rate": 2.4135944784864037e-05,
"loss": 0.2526,
"step": 1560
},
{
"epoch": 3.925,
"grad_norm": 0.08117042481899261,
"learning_rate": 2.3848612108853753e-05,
"loss": 0.0051,
"step": 1570
},
{
"epoch": 3.95,
"grad_norm": 0.0665692463517189,
"learning_rate": 2.3561279432843465e-05,
"loss": 0.0721,
"step": 1580
},
{
"epoch": 3.975,
"grad_norm": 0.12216989696025848,
"learning_rate": 2.327394675683318e-05,
"loss": 0.488,
"step": 1590
},
{
"epoch": 4.0,
"grad_norm": 0.2229517549276352,
"learning_rate": 2.2986614080822894e-05,
"loss": 0.3735,
"step": 1600
},
{
"epoch": 4.0,
"eval_accuracy": 0.865,
"eval_f1": 0.782258064516129,
"eval_loss": 0.6418908834457397,
"eval_precision": 0.776,
"eval_recall": 0.7886178861788617,
"eval_runtime": 1.5486,
"eval_samples_per_second": 258.306,
"eval_steps_per_second": 16.144,
"step": 1600
}
],
"logging_steps": 10,
"max_steps": 2400,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 847261481803776.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"_wandb": {},
"assignments": {},
"learning_rate": 6.895984224246868e-05,
"metric": "eval/loss",
"num_train_epochs": 6,
"per_device_train_batch_size": 4,
"seed": 31
}
}