fr-mina / checkpoint-4745 /trainer_state.json
Ronaldodev's picture
Upload folder using huggingface_hub
597f332 verified
raw
history blame
35.5 kB
{
"best_metric": 2.07828688621521,
"best_model_checkpoint": "fr-mina/checkpoint-4745",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 4745,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.026343519494204427,
"grad_norm": 53.76327133178711,
"learning_rate": 4.390586582367405e-07,
"loss": 8.0275,
"step": 25
},
{
"epoch": 0.05268703898840885,
"grad_norm": 31.924686431884766,
"learning_rate": 8.78117316473481e-07,
"loss": 8.0678,
"step": 50
},
{
"epoch": 0.07903055848261328,
"grad_norm": 39.08976745605469,
"learning_rate": 1.3171759747102214e-06,
"loss": 7.683,
"step": 75
},
{
"epoch": 0.1053740779768177,
"grad_norm": 35.28089904785156,
"learning_rate": 1.756234632946962e-06,
"loss": 7.5655,
"step": 100
},
{
"epoch": 0.13171759747102213,
"grad_norm": 26.91787338256836,
"learning_rate": 2.1952932911837023e-06,
"loss": 7.4298,
"step": 125
},
{
"epoch": 0.15806111696522657,
"grad_norm": 36.86225128173828,
"learning_rate": 2.634351949420443e-06,
"loss": 7.1249,
"step": 150
},
{
"epoch": 0.18440463645943098,
"grad_norm": 35.9243278503418,
"learning_rate": 3.073410607657183e-06,
"loss": 6.6859,
"step": 175
},
{
"epoch": 0.2107481559536354,
"grad_norm": 31.12093734741211,
"learning_rate": 3.512469265893924e-06,
"loss": 6.5787,
"step": 200
},
{
"epoch": 0.23709167544783982,
"grad_norm": 21.474767684936523,
"learning_rate": 3.951527924130664e-06,
"loss": 6.3458,
"step": 225
},
{
"epoch": 0.26343519494204426,
"grad_norm": 29.76708984375,
"learning_rate": 4.3905865823674045e-06,
"loss": 6.2686,
"step": 250
},
{
"epoch": 0.2897787144362487,
"grad_norm": 26.34191131591797,
"learning_rate": 4.829645240604145e-06,
"loss": 5.9099,
"step": 275
},
{
"epoch": 0.31612223393045313,
"grad_norm": 25.640949249267578,
"learning_rate": 5.268703898840886e-06,
"loss": 5.1909,
"step": 300
},
{
"epoch": 0.3424657534246575,
"grad_norm": 28.177034378051758,
"learning_rate": 5.707762557077626e-06,
"loss": 5.6095,
"step": 325
},
{
"epoch": 0.36880927291886195,
"grad_norm": 31.351806640625,
"learning_rate": 6.146821215314366e-06,
"loss": 5.356,
"step": 350
},
{
"epoch": 0.3951527924130664,
"grad_norm": 31.123451232910156,
"learning_rate": 6.585879873551107e-06,
"loss": 5.3215,
"step": 375
},
{
"epoch": 0.4214963119072708,
"grad_norm": 25.988691329956055,
"learning_rate": 7.024938531787848e-06,
"loss": 4.9161,
"step": 400
},
{
"epoch": 0.44783983140147526,
"grad_norm": 20.850446701049805,
"learning_rate": 7.463997190024588e-06,
"loss": 5.0717,
"step": 425
},
{
"epoch": 0.47418335089567965,
"grad_norm": 28.87261962890625,
"learning_rate": 7.903055848261329e-06,
"loss": 4.7932,
"step": 450
},
{
"epoch": 0.5005268703898841,
"grad_norm": 39.34148406982422,
"learning_rate": 8.342114506498068e-06,
"loss": 4.9443,
"step": 475
},
{
"epoch": 0.5268703898840885,
"grad_norm": 24.568782806396484,
"learning_rate": 8.781173164734809e-06,
"loss": 4.6028,
"step": 500
},
{
"epoch": 0.553213909378293,
"grad_norm": 38.465816497802734,
"learning_rate": 9.22023182297155e-06,
"loss": 4.2893,
"step": 525
},
{
"epoch": 0.5795574288724974,
"grad_norm": 22.170591354370117,
"learning_rate": 9.65929048120829e-06,
"loss": 4.3665,
"step": 550
},
{
"epoch": 0.6059009483667018,
"grad_norm": 25.342151641845703,
"learning_rate": 1.009834913944503e-05,
"loss": 4.4401,
"step": 575
},
{
"epoch": 0.6322444678609063,
"grad_norm": 29.228736877441406,
"learning_rate": 1.0537407797681771e-05,
"loss": 4.0221,
"step": 600
},
{
"epoch": 0.6585879873551106,
"grad_norm": 26.896635055541992,
"learning_rate": 1.0976466455918512e-05,
"loss": 3.9198,
"step": 625
},
{
"epoch": 0.684931506849315,
"grad_norm": 22.59519386291504,
"learning_rate": 1.1415525114155251e-05,
"loss": 3.9569,
"step": 650
},
{
"epoch": 0.7112750263435195,
"grad_norm": 27.328128814697266,
"learning_rate": 1.1854583772391992e-05,
"loss": 3.7594,
"step": 675
},
{
"epoch": 0.7376185458377239,
"grad_norm": 28.952634811401367,
"learning_rate": 1.2293642430628732e-05,
"loss": 3.6405,
"step": 700
},
{
"epoch": 0.7639620653319283,
"grad_norm": 34.70003890991211,
"learning_rate": 1.2732701088865473e-05,
"loss": 3.5767,
"step": 725
},
{
"epoch": 0.7903055848261328,
"grad_norm": 31.843168258666992,
"learning_rate": 1.3171759747102214e-05,
"loss": 3.424,
"step": 750
},
{
"epoch": 0.8166491043203372,
"grad_norm": 43.431888580322266,
"learning_rate": 1.3610818405338955e-05,
"loss": 3.6013,
"step": 775
},
{
"epoch": 0.8429926238145417,
"grad_norm": 24.29863929748535,
"learning_rate": 1.4049877063575696e-05,
"loss": 3.6026,
"step": 800
},
{
"epoch": 0.8693361433087461,
"grad_norm": 18.842683792114258,
"learning_rate": 1.4488935721812435e-05,
"loss": 3.2906,
"step": 825
},
{
"epoch": 0.8956796628029505,
"grad_norm": 24.327327728271484,
"learning_rate": 1.4927994380049176e-05,
"loss": 3.3199,
"step": 850
},
{
"epoch": 0.9220231822971549,
"grad_norm": 34.937644958496094,
"learning_rate": 1.536705303828592e-05,
"loss": 3.3293,
"step": 875
},
{
"epoch": 0.9483667017913593,
"grad_norm": 20.94723129272461,
"learning_rate": 1.5806111696522658e-05,
"loss": 3.061,
"step": 900
},
{
"epoch": 0.9747102212855637,
"grad_norm": 36.616371154785156,
"learning_rate": 1.6245170354759397e-05,
"loss": 3.3562,
"step": 925
},
{
"epoch": 1.0,
"eval_gen_len": 29.7495,
"eval_loss": 3.1300747394561768,
"eval_rouge1": 14.3649,
"eval_rouge2": 3.2092,
"eval_rougeL": 14.1859,
"eval_rougeLsum": 14.1403,
"eval_runtime": 1240.6646,
"eval_samples_per_second": 0.383,
"eval_steps_per_second": 0.096,
"step": 949
},
{
"epoch": 1.0010537407797682,
"grad_norm": 26.46944236755371,
"learning_rate": 1.6684229012996136e-05,
"loss": 3.2056,
"step": 950
},
{
"epoch": 1.0273972602739727,
"grad_norm": 24.677677154541016,
"learning_rate": 1.7123287671232875e-05,
"loss": 3.0972,
"step": 975
},
{
"epoch": 1.053740779768177,
"grad_norm": 44.66987609863281,
"learning_rate": 1.7562346329469618e-05,
"loss": 3.1316,
"step": 1000
},
{
"epoch": 1.0800842992623814,
"grad_norm": 22.583946228027344,
"learning_rate": 1.8001404987706357e-05,
"loss": 2.9408,
"step": 1025
},
{
"epoch": 1.106427818756586,
"grad_norm": 37.008148193359375,
"learning_rate": 1.84404636459431e-05,
"loss": 3.3225,
"step": 1050
},
{
"epoch": 1.1327713382507902,
"grad_norm": 31.299516677856445,
"learning_rate": 1.887952230417984e-05,
"loss": 2.8608,
"step": 1075
},
{
"epoch": 1.1591148577449948,
"grad_norm": 41.19516372680664,
"learning_rate": 1.931858096241658e-05,
"loss": 2.9428,
"step": 1100
},
{
"epoch": 1.1854583772391991,
"grad_norm": 36.30160903930664,
"learning_rate": 1.975763962065332e-05,
"loss": 3.0541,
"step": 1125
},
{
"epoch": 1.2118018967334037,
"grad_norm": 29.6497859954834,
"learning_rate": 2.019669827889006e-05,
"loss": 3.0771,
"step": 1150
},
{
"epoch": 1.238145416227608,
"grad_norm": 28.226837158203125,
"learning_rate": 2.0635756937126803e-05,
"loss": 2.8116,
"step": 1175
},
{
"epoch": 1.2644889357218125,
"grad_norm": 37.17683029174805,
"learning_rate": 2.1074815595363542e-05,
"loss": 2.709,
"step": 1200
},
{
"epoch": 1.2908324552160169,
"grad_norm": 24.86113929748535,
"learning_rate": 2.1513874253600282e-05,
"loss": 2.8454,
"step": 1225
},
{
"epoch": 1.3171759747102212,
"grad_norm": 20.32333755493164,
"learning_rate": 2.1952932911837024e-05,
"loss": 3.0137,
"step": 1250
},
{
"epoch": 1.3435194942044257,
"grad_norm": 20.579383850097656,
"learning_rate": 2.2391991570073764e-05,
"loss": 3.064,
"step": 1275
},
{
"epoch": 1.36986301369863,
"grad_norm": 34.56845474243164,
"learning_rate": 2.2831050228310503e-05,
"loss": 2.7602,
"step": 1300
},
{
"epoch": 1.3962065331928346,
"grad_norm": 26.004846572875977,
"learning_rate": 2.3270108886547242e-05,
"loss": 2.6513,
"step": 1325
},
{
"epoch": 1.422550052687039,
"grad_norm": 27.322162628173828,
"learning_rate": 2.3709167544783985e-05,
"loss": 3.0455,
"step": 1350
},
{
"epoch": 1.4488935721812435,
"grad_norm": 25.812335968017578,
"learning_rate": 2.4148226203020724e-05,
"loss": 2.6421,
"step": 1375
},
{
"epoch": 1.4752370916754478,
"grad_norm": 22.53229522705078,
"learning_rate": 2.4587284861257463e-05,
"loss": 2.6565,
"step": 1400
},
{
"epoch": 1.5015806111696524,
"grad_norm": 21.36598777770996,
"learning_rate": 2.5026343519494206e-05,
"loss": 2.8773,
"step": 1425
},
{
"epoch": 1.5279241306638567,
"grad_norm": 21.852357864379883,
"learning_rate": 2.5465402177730945e-05,
"loss": 2.5911,
"step": 1450
},
{
"epoch": 1.554267650158061,
"grad_norm": 24.827373504638672,
"learning_rate": 2.5904460835967688e-05,
"loss": 2.8661,
"step": 1475
},
{
"epoch": 1.5806111696522656,
"grad_norm": 26.9720458984375,
"learning_rate": 2.6343519494204427e-05,
"loss": 2.6055,
"step": 1500
},
{
"epoch": 1.60695468914647,
"grad_norm": 23.86491584777832,
"learning_rate": 2.6782578152441166e-05,
"loss": 2.4583,
"step": 1525
},
{
"epoch": 1.6332982086406744,
"grad_norm": 35.060543060302734,
"learning_rate": 2.722163681067791e-05,
"loss": 2.7086,
"step": 1550
},
{
"epoch": 1.6596417281348788,
"grad_norm": 33.904869079589844,
"learning_rate": 2.766069546891465e-05,
"loss": 3.0039,
"step": 1575
},
{
"epoch": 1.685985247629083,
"grad_norm": 16.747236251831055,
"learning_rate": 2.809975412715139e-05,
"loss": 2.7121,
"step": 1600
},
{
"epoch": 1.7123287671232876,
"grad_norm": 29.914405822753906,
"learning_rate": 2.853881278538813e-05,
"loss": 2.4394,
"step": 1625
},
{
"epoch": 1.7386722866174922,
"grad_norm": 34.94831848144531,
"learning_rate": 2.897787144362487e-05,
"loss": 2.2323,
"step": 1650
},
{
"epoch": 1.7650158061116965,
"grad_norm": 23.3820743560791,
"learning_rate": 2.9416930101861612e-05,
"loss": 2.2026,
"step": 1675
},
{
"epoch": 1.7913593256059008,
"grad_norm": 23.368724822998047,
"learning_rate": 2.985598876009835e-05,
"loss": 2.3719,
"step": 1700
},
{
"epoch": 1.8177028451001054,
"grad_norm": 23.903703689575195,
"learning_rate": 3.0295047418335094e-05,
"loss": 2.6685,
"step": 1725
},
{
"epoch": 1.84404636459431,
"grad_norm": 28.905014038085938,
"learning_rate": 3.073410607657184e-05,
"loss": 2.798,
"step": 1750
},
{
"epoch": 1.8703898840885143,
"grad_norm": 37.410953521728516,
"learning_rate": 3.117316473480857e-05,
"loss": 2.8655,
"step": 1775
},
{
"epoch": 1.8967334035827186,
"grad_norm": 30.735639572143555,
"learning_rate": 3.1612223393045315e-05,
"loss": 2.4698,
"step": 1800
},
{
"epoch": 1.9230769230769231,
"grad_norm": 17.455341339111328,
"learning_rate": 3.205128205128206e-05,
"loss": 2.5119,
"step": 1825
},
{
"epoch": 1.9494204425711275,
"grad_norm": 34.35090255737305,
"learning_rate": 3.2490340709518794e-05,
"loss": 2.6294,
"step": 1850
},
{
"epoch": 1.975763962065332,
"grad_norm": 22.377056121826172,
"learning_rate": 3.292939936775553e-05,
"loss": 2.5728,
"step": 1875
},
{
"epoch": 2.0,
"eval_gen_len": 8.5832,
"eval_loss": 2.4636101722717285,
"eval_rouge1": 25.9213,
"eval_rouge2": 9.8991,
"eval_rougeL": 25.4651,
"eval_rougeLsum": 25.4305,
"eval_runtime": 181.2174,
"eval_samples_per_second": 2.621,
"eval_steps_per_second": 0.657,
"step": 1898
},
{
"epoch": 2.0021074815595363,
"grad_norm": 24.795652389526367,
"learning_rate": 3.336845802599227e-05,
"loss": 2.0971,
"step": 1900
},
{
"epoch": 2.0284510010537407,
"grad_norm": 42.50971984863281,
"learning_rate": 3.3807516684229015e-05,
"loss": 2.2645,
"step": 1925
},
{
"epoch": 2.0547945205479454,
"grad_norm": 28.93120765686035,
"learning_rate": 3.424657534246575e-05,
"loss": 2.2754,
"step": 1950
},
{
"epoch": 2.0811380400421498,
"grad_norm": 32.35176467895508,
"learning_rate": 3.4685634000702494e-05,
"loss": 1.9605,
"step": 1975
},
{
"epoch": 2.107481559536354,
"grad_norm": 20.049243927001953,
"learning_rate": 3.5124692658939236e-05,
"loss": 1.985,
"step": 2000
},
{
"epoch": 2.1338250790305584,
"grad_norm": 21.458831787109375,
"learning_rate": 3.556375131717597e-05,
"loss": 1.9294,
"step": 2025
},
{
"epoch": 2.1601685985247627,
"grad_norm": 29.076791763305664,
"learning_rate": 3.6002809975412715e-05,
"loss": 2.5018,
"step": 2050
},
{
"epoch": 2.1865121180189675,
"grad_norm": 17.459365844726562,
"learning_rate": 3.644186863364946e-05,
"loss": 2.2102,
"step": 2075
},
{
"epoch": 2.212855637513172,
"grad_norm": 35.115997314453125,
"learning_rate": 3.68809272918862e-05,
"loss": 2.2814,
"step": 2100
},
{
"epoch": 2.239199157007376,
"grad_norm": 20.998336791992188,
"learning_rate": 3.7319985950122936e-05,
"loss": 2.1817,
"step": 2125
},
{
"epoch": 2.2655426765015805,
"grad_norm": 24.086315155029297,
"learning_rate": 3.775904460835968e-05,
"loss": 2.3293,
"step": 2150
},
{
"epoch": 2.291886195995785,
"grad_norm": 15.898116111755371,
"learning_rate": 3.819810326659642e-05,
"loss": 2.3658,
"step": 2175
},
{
"epoch": 2.3182297154899896,
"grad_norm": 44.00349807739258,
"learning_rate": 3.863716192483316e-05,
"loss": 2.2536,
"step": 2200
},
{
"epoch": 2.344573234984194,
"grad_norm": 30.016952514648438,
"learning_rate": 3.90762205830699e-05,
"loss": 2.1975,
"step": 2225
},
{
"epoch": 2.3709167544783982,
"grad_norm": 29.138978958129883,
"learning_rate": 3.951527924130664e-05,
"loss": 2.1086,
"step": 2250
},
{
"epoch": 2.3972602739726026,
"grad_norm": 22.48296546936035,
"learning_rate": 3.995433789954338e-05,
"loss": 1.8731,
"step": 2275
},
{
"epoch": 2.4236037934668073,
"grad_norm": 38.969642639160156,
"learning_rate": 4.039339655778012e-05,
"loss": 2.4843,
"step": 2300
},
{
"epoch": 2.4499473129610116,
"grad_norm": 46.12045669555664,
"learning_rate": 4.0832455216016864e-05,
"loss": 2.0695,
"step": 2325
},
{
"epoch": 2.476290832455216,
"grad_norm": 16.667869567871094,
"learning_rate": 4.1271513874253606e-05,
"loss": 2.0357,
"step": 2350
},
{
"epoch": 2.5026343519494203,
"grad_norm": 28.602094650268555,
"learning_rate": 4.171057253249034e-05,
"loss": 1.9143,
"step": 2375
},
{
"epoch": 2.528977871443625,
"grad_norm": 21.20049476623535,
"learning_rate": 4.2149631190727085e-05,
"loss": 2.329,
"step": 2400
},
{
"epoch": 2.5553213909378294,
"grad_norm": 24.602462768554688,
"learning_rate": 4.258868984896383e-05,
"loss": 1.9732,
"step": 2425
},
{
"epoch": 2.5816649104320337,
"grad_norm": 41.749755859375,
"learning_rate": 4.3027748507200563e-05,
"loss": 2.3922,
"step": 2450
},
{
"epoch": 2.608008429926238,
"grad_norm": 29.759376525878906,
"learning_rate": 4.3466807165437306e-05,
"loss": 2.1928,
"step": 2475
},
{
"epoch": 2.6343519494204424,
"grad_norm": 27.628883361816406,
"learning_rate": 4.390586582367405e-05,
"loss": 2.6254,
"step": 2500
},
{
"epoch": 2.660695468914647,
"grad_norm": 20.805973052978516,
"learning_rate": 4.4344924481910785e-05,
"loss": 1.937,
"step": 2525
},
{
"epoch": 2.6870389884088515,
"grad_norm": 24.72698974609375,
"learning_rate": 4.478398314014753e-05,
"loss": 1.9697,
"step": 2550
},
{
"epoch": 2.713382507903056,
"grad_norm": 32.659202575683594,
"learning_rate": 4.522304179838426e-05,
"loss": 1.945,
"step": 2575
},
{
"epoch": 2.73972602739726,
"grad_norm": 30.338882446289062,
"learning_rate": 4.5662100456621006e-05,
"loss": 2.3917,
"step": 2600
},
{
"epoch": 2.7660695468914644,
"grad_norm": 24.97830581665039,
"learning_rate": 4.610115911485774e-05,
"loss": 2.3004,
"step": 2625
},
{
"epoch": 2.792413066385669,
"grad_norm": 20.519182205200195,
"learning_rate": 4.6540217773094484e-05,
"loss": 2.0284,
"step": 2650
},
{
"epoch": 2.8187565858798735,
"grad_norm": 18.327741622924805,
"learning_rate": 4.697927643133123e-05,
"loss": 1.8822,
"step": 2675
},
{
"epoch": 2.845100105374078,
"grad_norm": 28.89052963256836,
"learning_rate": 4.741833508956797e-05,
"loss": 2.3916,
"step": 2700
},
{
"epoch": 2.8714436248682826,
"grad_norm": 17.761756896972656,
"learning_rate": 4.7857393747804706e-05,
"loss": 2.1041,
"step": 2725
},
{
"epoch": 2.897787144362487,
"grad_norm": 16.94314193725586,
"learning_rate": 4.829645240604145e-05,
"loss": 2.0099,
"step": 2750
},
{
"epoch": 2.9241306638566913,
"grad_norm": 28.35548973083496,
"learning_rate": 4.873551106427819e-05,
"loss": 1.9854,
"step": 2775
},
{
"epoch": 2.9504741833508956,
"grad_norm": 20.59808921813965,
"learning_rate": 4.917456972251493e-05,
"loss": 2.0036,
"step": 2800
},
{
"epoch": 2.9768177028451,
"grad_norm": 24.746854782104492,
"learning_rate": 4.961362838075167e-05,
"loss": 2.0894,
"step": 2825
},
{
"epoch": 3.0,
"eval_gen_len": 18.1389,
"eval_loss": 2.2465784549713135,
"eval_rouge1": 27.1383,
"eval_rouge2": 12.4631,
"eval_rougeL": 26.7601,
"eval_rougeLsum": 26.8419,
"eval_runtime": 963.3991,
"eval_samples_per_second": 0.493,
"eval_steps_per_second": 0.124,
"step": 2847
},
{
"epoch": 3.0031612223393047,
"grad_norm": 15.351967811584473,
"learning_rate": 4.9994145884556845e-05,
"loss": 1.7484,
"step": 2850
},
{
"epoch": 3.029504741833509,
"grad_norm": 12.023371696472168,
"learning_rate": 4.994536158919721e-05,
"loss": 1.6976,
"step": 2875
},
{
"epoch": 3.0558482613277134,
"grad_norm": 16.859472274780273,
"learning_rate": 4.989657729383757e-05,
"loss": 1.5859,
"step": 2900
},
{
"epoch": 3.0821917808219177,
"grad_norm": 11.666887283325195,
"learning_rate": 4.984779299847793e-05,
"loss": 1.518,
"step": 2925
},
{
"epoch": 3.108535300316122,
"grad_norm": 16.126272201538086,
"learning_rate": 4.979900870311829e-05,
"loss": 1.6562,
"step": 2950
},
{
"epoch": 3.134878819810327,
"grad_norm": 16.743412017822266,
"learning_rate": 4.9750224407758655e-05,
"loss": 1.7747,
"step": 2975
},
{
"epoch": 3.161222339304531,
"grad_norm": 21.94359588623047,
"learning_rate": 4.9701440112399017e-05,
"loss": 1.3415,
"step": 3000
},
{
"epoch": 3.1875658587987354,
"grad_norm": 20.92635154724121,
"learning_rate": 4.965265581703938e-05,
"loss": 1.5768,
"step": 3025
},
{
"epoch": 3.2139093782929398,
"grad_norm": 19.112150192260742,
"learning_rate": 4.960387152167975e-05,
"loss": 1.4553,
"step": 3050
},
{
"epoch": 3.2402528977871445,
"grad_norm": 14.94166088104248,
"learning_rate": 4.955508722632011e-05,
"loss": 1.5237,
"step": 3075
},
{
"epoch": 3.266596417281349,
"grad_norm": 32.23541259765625,
"learning_rate": 4.950630293096047e-05,
"loss": 1.7323,
"step": 3100
},
{
"epoch": 3.292939936775553,
"grad_norm": 15.371477127075195,
"learning_rate": 4.945751863560083e-05,
"loss": 1.661,
"step": 3125
},
{
"epoch": 3.3192834562697575,
"grad_norm": 17.414264678955078,
"learning_rate": 4.9408734340241195e-05,
"loss": 1.8292,
"step": 3150
},
{
"epoch": 3.3456269757639623,
"grad_norm": 24.73505401611328,
"learning_rate": 4.935995004488155e-05,
"loss": 1.4615,
"step": 3175
},
{
"epoch": 3.3719704952581666,
"grad_norm": 33.94083023071289,
"learning_rate": 4.931116574952191e-05,
"loss": 1.7071,
"step": 3200
},
{
"epoch": 3.398314014752371,
"grad_norm": 38.488319396972656,
"learning_rate": 4.9262381454162274e-05,
"loss": 1.5914,
"step": 3225
},
{
"epoch": 3.4246575342465753,
"grad_norm": 15.29854965209961,
"learning_rate": 4.9213597158802636e-05,
"loss": 1.529,
"step": 3250
},
{
"epoch": 3.4510010537407796,
"grad_norm": 39.15888977050781,
"learning_rate": 4.9164812863443005e-05,
"loss": 1.6042,
"step": 3275
},
{
"epoch": 3.4773445732349844,
"grad_norm": 37.293724060058594,
"learning_rate": 4.9116028568083366e-05,
"loss": 1.4374,
"step": 3300
},
{
"epoch": 3.5036880927291887,
"grad_norm": 27.48360824584961,
"learning_rate": 4.906724427272373e-05,
"loss": 1.5287,
"step": 3325
},
{
"epoch": 3.530031612223393,
"grad_norm": 13.632729530334473,
"learning_rate": 4.901845997736409e-05,
"loss": 1.563,
"step": 3350
},
{
"epoch": 3.5563751317175973,
"grad_norm": 20.885683059692383,
"learning_rate": 4.896967568200445e-05,
"loss": 1.638,
"step": 3375
},
{
"epoch": 3.5827186512118017,
"grad_norm": 46.394649505615234,
"learning_rate": 4.8920891386644814e-05,
"loss": 1.6575,
"step": 3400
},
{
"epoch": 3.6090621707060064,
"grad_norm": 37.50140380859375,
"learning_rate": 4.8872107091285176e-05,
"loss": 1.7124,
"step": 3425
},
{
"epoch": 3.6354056902002108,
"grad_norm": 20.98285675048828,
"learning_rate": 4.882332279592554e-05,
"loss": 1.6254,
"step": 3450
},
{
"epoch": 3.661749209694415,
"grad_norm": 11.166362762451172,
"learning_rate": 4.87745385005659e-05,
"loss": 1.69,
"step": 3475
},
{
"epoch": 3.68809272918862,
"grad_norm": 29.99367904663086,
"learning_rate": 4.872575420520626e-05,
"loss": 1.5022,
"step": 3500
},
{
"epoch": 3.714436248682824,
"grad_norm": 24.22369384765625,
"learning_rate": 4.8676969909846624e-05,
"loss": 1.8366,
"step": 3525
},
{
"epoch": 3.7407797681770285,
"grad_norm": 18.417123794555664,
"learning_rate": 4.8628185614486986e-05,
"loss": 1.5953,
"step": 3550
},
{
"epoch": 3.767123287671233,
"grad_norm": 31.92102813720703,
"learning_rate": 4.857940131912735e-05,
"loss": 1.8851,
"step": 3575
},
{
"epoch": 3.793466807165437,
"grad_norm": 19.559165954589844,
"learning_rate": 4.853061702376771e-05,
"loss": 1.3543,
"step": 3600
},
{
"epoch": 3.819810326659642,
"grad_norm": 27.526994705200195,
"learning_rate": 4.848183272840807e-05,
"loss": 1.4503,
"step": 3625
},
{
"epoch": 3.8461538461538463,
"grad_norm": 21.375267028808594,
"learning_rate": 4.8433048433048433e-05,
"loss": 1.7254,
"step": 3650
},
{
"epoch": 3.8724973656480506,
"grad_norm": 23.377002716064453,
"learning_rate": 4.8384264137688795e-05,
"loss": 1.6948,
"step": 3675
},
{
"epoch": 3.898840885142255,
"grad_norm": 32.76591110229492,
"learning_rate": 4.8335479842329164e-05,
"loss": 1.5097,
"step": 3700
},
{
"epoch": 3.9251844046364592,
"grad_norm": 28.73731803894043,
"learning_rate": 4.8286695546969526e-05,
"loss": 1.3383,
"step": 3725
},
{
"epoch": 3.951527924130664,
"grad_norm": 33.73754119873047,
"learning_rate": 4.823791125160989e-05,
"loss": 1.4893,
"step": 3750
},
{
"epoch": 3.9778714436248683,
"grad_norm": 16.65566635131836,
"learning_rate": 4.818912695625025e-05,
"loss": 1.6426,
"step": 3775
},
{
"epoch": 4.0,
"eval_gen_len": 8.9979,
"eval_loss": 2.1172335147857666,
"eval_rouge1": 33.7585,
"eval_rouge2": 17.8059,
"eval_rougeL": 33.2997,
"eval_rougeLsum": 33.2581,
"eval_runtime": 180.3517,
"eval_samples_per_second": 2.634,
"eval_steps_per_second": 0.66,
"step": 3796
},
{
"epoch": 4.004214963119073,
"grad_norm": 26.248737335205078,
"learning_rate": 4.8140342660890605e-05,
"loss": 1.4139,
"step": 3800
},
{
"epoch": 4.030558482613277,
"grad_norm": 40.171348571777344,
"learning_rate": 4.809155836553097e-05,
"loss": 0.9346,
"step": 3825
},
{
"epoch": 4.056902002107481,
"grad_norm": 18.67420768737793,
"learning_rate": 4.804277407017133e-05,
"loss": 1.0417,
"step": 3850
},
{
"epoch": 4.083245521601686,
"grad_norm": 15.394213676452637,
"learning_rate": 4.799398977481169e-05,
"loss": 1.1243,
"step": 3875
},
{
"epoch": 4.109589041095891,
"grad_norm": 14.950016021728516,
"learning_rate": 4.794520547945205e-05,
"loss": 0.9402,
"step": 3900
},
{
"epoch": 4.135932560590095,
"grad_norm": 33.47651290893555,
"learning_rate": 4.789642118409242e-05,
"loss": 1.053,
"step": 3925
},
{
"epoch": 4.1622760800842995,
"grad_norm": 23.766538619995117,
"learning_rate": 4.784763688873278e-05,
"loss": 0.9098,
"step": 3950
},
{
"epoch": 4.188619599578503,
"grad_norm": 24.7260684967041,
"learning_rate": 4.7798852593373145e-05,
"loss": 0.8711,
"step": 3975
},
{
"epoch": 4.214963119072708,
"grad_norm": 14.13185977935791,
"learning_rate": 4.775006829801351e-05,
"loss": 0.9576,
"step": 4000
},
{
"epoch": 4.241306638566913,
"grad_norm": 26.87774085998535,
"learning_rate": 4.770128400265387e-05,
"loss": 1.1116,
"step": 4025
},
{
"epoch": 4.267650158061117,
"grad_norm": 24.789899826049805,
"learning_rate": 4.765249970729423e-05,
"loss": 1.2492,
"step": 4050
},
{
"epoch": 4.293993677555322,
"grad_norm": 35.61201858520508,
"learning_rate": 4.760371541193459e-05,
"loss": 1.0259,
"step": 4075
},
{
"epoch": 4.3203371970495255,
"grad_norm": 18.178863525390625,
"learning_rate": 4.7554931116574955e-05,
"loss": 1.0043,
"step": 4100
},
{
"epoch": 4.34668071654373,
"grad_norm": 18.80617904663086,
"learning_rate": 4.750614682121532e-05,
"loss": 1.254,
"step": 4125
},
{
"epoch": 4.373024236037935,
"grad_norm": 23.337120056152344,
"learning_rate": 4.745736252585568e-05,
"loss": 1.3335,
"step": 4150
},
{
"epoch": 4.399367755532139,
"grad_norm": 27.5556640625,
"learning_rate": 4.740857823049604e-05,
"loss": 1.1739,
"step": 4175
},
{
"epoch": 4.425711275026344,
"grad_norm": 14.718132019042969,
"learning_rate": 4.73597939351364e-05,
"loss": 1.232,
"step": 4200
},
{
"epoch": 4.4520547945205475,
"grad_norm": 16.896610260009766,
"learning_rate": 4.7311009639776765e-05,
"loss": 0.9822,
"step": 4225
},
{
"epoch": 4.478398314014752,
"grad_norm": 32.641151428222656,
"learning_rate": 4.7262225344417126e-05,
"loss": 1.1221,
"step": 4250
},
{
"epoch": 4.504741833508957,
"grad_norm": 8.409883499145508,
"learning_rate": 4.721344104905749e-05,
"loss": 1.0836,
"step": 4275
},
{
"epoch": 4.531085353003161,
"grad_norm": 24.261465072631836,
"learning_rate": 4.716465675369785e-05,
"loss": 1.0266,
"step": 4300
},
{
"epoch": 4.557428872497366,
"grad_norm": 27.14542579650879,
"learning_rate": 4.711587245833821e-05,
"loss": 0.9686,
"step": 4325
},
{
"epoch": 4.58377239199157,
"grad_norm": 27.667455673217773,
"learning_rate": 4.706708816297858e-05,
"loss": 1.0164,
"step": 4350
},
{
"epoch": 4.610115911485774,
"grad_norm": 22.17734718322754,
"learning_rate": 4.701830386761894e-05,
"loss": 1.1826,
"step": 4375
},
{
"epoch": 4.636459430979979,
"grad_norm": 19.197708129882812,
"learning_rate": 4.6969519572259305e-05,
"loss": 0.8725,
"step": 4400
},
{
"epoch": 4.662802950474183,
"grad_norm": 18.602115631103516,
"learning_rate": 4.692073527689966e-05,
"loss": 1.3157,
"step": 4425
},
{
"epoch": 4.689146469968388,
"grad_norm": 19.589008331298828,
"learning_rate": 4.687195098154002e-05,
"loss": 1.2225,
"step": 4450
},
{
"epoch": 4.715489989462593,
"grad_norm": 24.026233673095703,
"learning_rate": 4.6823166686180384e-05,
"loss": 1.1642,
"step": 4475
},
{
"epoch": 4.7418335089567965,
"grad_norm": 16.99645233154297,
"learning_rate": 4.6774382390820746e-05,
"loss": 1.0141,
"step": 4500
},
{
"epoch": 4.768177028451001,
"grad_norm": 15.595638275146484,
"learning_rate": 4.672559809546111e-05,
"loss": 1.1805,
"step": 4525
},
{
"epoch": 4.794520547945205,
"grad_norm": 16.28839683532715,
"learning_rate": 4.667681380010147e-05,
"loss": 1.3,
"step": 4550
},
{
"epoch": 4.82086406743941,
"grad_norm": 27.51399040222168,
"learning_rate": 4.662802950474184e-05,
"loss": 1.1732,
"step": 4575
},
{
"epoch": 4.847207586933615,
"grad_norm": 30.363340377807617,
"learning_rate": 4.65792452093822e-05,
"loss": 1.2096,
"step": 4600
},
{
"epoch": 4.8735511064278185,
"grad_norm": 27.562767028808594,
"learning_rate": 4.653046091402256e-05,
"loss": 1.0491,
"step": 4625
},
{
"epoch": 4.899894625922023,
"grad_norm": 15.739079475402832,
"learning_rate": 4.6481676618662924e-05,
"loss": 1.0191,
"step": 4650
},
{
"epoch": 4.926238145416228,
"grad_norm": 30.40022850036621,
"learning_rate": 4.6432892323303286e-05,
"loss": 1.2202,
"step": 4675
},
{
"epoch": 4.952581664910432,
"grad_norm": 14.871630668640137,
"learning_rate": 4.638410802794365e-05,
"loss": 1.0677,
"step": 4700
},
{
"epoch": 4.978925184404637,
"grad_norm": 25.979448318481445,
"learning_rate": 4.633532373258401e-05,
"loss": 1.2033,
"step": 4725
},
{
"epoch": 5.0,
"eval_gen_len": 11.7958,
"eval_loss": 2.07828688621521,
"eval_rouge1": 37.8388,
"eval_rouge2": 19.8532,
"eval_rougeL": 37.3344,
"eval_rougeLsum": 37.4253,
"eval_runtime": 401.1496,
"eval_samples_per_second": 1.184,
"eval_steps_per_second": 0.297,
"step": 4745
}
],
"logging_steps": 25,
"max_steps": 28470,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 15600663134208.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}