bart-large-fleece2instructions-r1 / trainer_state.json
AleBurzio's picture
End of training
47f3fdb
raw
history blame
30.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 724,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 8.181818181818181e-06,
"loss": 4.2239,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 1.6363636363636363e-05,
"loss": 3.4957,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 2.454545454545455e-05,
"loss": 2.2751,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 3.2727272727272725e-05,
"loss": 1.892,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 4.090909090909091e-05,
"loss": 1.6932,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 4.90909090909091e-05,
"loss": 1.5781,
"step": 18
},
{
"epoch": 0.06,
"learning_rate": 5.7272727272727274e-05,
"loss": 1.4815,
"step": 21
},
{
"epoch": 0.07,
"learning_rate": 5.9998798361395565e-05,
"loss": 1.6128,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 5.9992490021938124e-05,
"loss": 1.4573,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 5.998077570751456e-05,
"loss": 1.6366,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 5.996365752956315e-05,
"loss": 1.6786,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 5.9941138573537655e-05,
"loss": 1.4698,
"step": 36
},
{
"epoch": 0.11,
"learning_rate": 5.991322289835123e-05,
"loss": 1.4493,
"step": 39
},
{
"epoch": 0.12,
"learning_rate": 5.987991553564485e-05,
"loss": 1.5291,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 5.984122248888033e-05,
"loss": 1.525,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 5.979715073225829e-05,
"loss": 1.6919,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 5.974770820946105e-05,
"loss": 1.4581,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 5.969290383222086e-05,
"loss": 1.4961,
"step": 54
},
{
"epoch": 0.16,
"learning_rate": 5.96327474787136e-05,
"loss": 1.6307,
"step": 57
},
{
"epoch": 0.17,
"learning_rate": 5.956724999177828e-05,
"loss": 1.5315,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 5.949642317696271e-05,
"loss": 1.4427,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 5.94202798003956e-05,
"loss": 1.4304,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 5.933883358648552e-05,
"loss": 1.394,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 5.9252099215447206e-05,
"loss": 1.4573,
"step": 72
},
{
"epoch": 0.21,
"learning_rate": 5.916009232065549e-05,
"loss": 1.4003,
"step": 75
},
{
"epoch": 0.22,
"learning_rate": 5.906282948582746e-05,
"loss": 1.3632,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 5.8960328242033405e-05,
"loss": 1.368,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 5.885260706453688e-05,
"loss": 1.4453,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 5.873968536946467e-05,
"loss": 1.444,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 5.862158351030714e-05,
"loss": 1.487,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 5.849832277424963e-05,
"loss": 1.2537,
"step": 93
},
{
"epoch": 0.27,
"learning_rate": 5.8369925378335574e-05,
"loss": 1.2746,
"step": 96
},
{
"epoch": 0.27,
"learning_rate": 5.8236414465462e-05,
"loss": 1.3658,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 5.809781410020814e-05,
"loss": 1.2756,
"step": 102
},
{
"epoch": 0.29,
"learning_rate": 5.795414926449796e-05,
"loss": 1.3328,
"step": 105
},
{
"epoch": 0.3,
"learning_rate": 5.780544585309725e-05,
"loss": 1.2045,
"step": 108
},
{
"epoch": 0.31,
"learning_rate": 5.7651730668946335e-05,
"loss": 1.2833,
"step": 111
},
{
"epoch": 0.31,
"learning_rate": 5.749303141832889e-05,
"loss": 1.4133,
"step": 114
},
{
"epoch": 0.32,
"learning_rate": 5.732937670587814e-05,
"loss": 1.2067,
"step": 117
},
{
"epoch": 0.33,
"learning_rate": 5.716079602942096e-05,
"loss": 1.312,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 5.698731977466112e-05,
"loss": 1.2986,
"step": 123
},
{
"epoch": 0.35,
"learning_rate": 5.680897920970237e-05,
"loss": 1.3776,
"step": 126
},
{
"epoch": 0.36,
"learning_rate": 5.662580647941262e-05,
"loss": 1.1815,
"step": 129
},
{
"epoch": 0.36,
"learning_rate": 5.643783459962997e-05,
"loss": 1.2364,
"step": 132
},
{
"epoch": 0.37,
"learning_rate": 5.6245097451211754e-05,
"loss": 1.2424,
"step": 135
},
{
"epoch": 0.38,
"learning_rate": 5.604762977392781e-05,
"loss": 1.2353,
"step": 138
},
{
"epoch": 0.39,
"learning_rate": 5.584546716019874e-05,
"loss": 1.2967,
"step": 141
},
{
"epoch": 0.4,
"learning_rate": 5.563864604868061e-05,
"loss": 1.3166,
"step": 144
},
{
"epoch": 0.41,
"learning_rate": 5.5427203717697134e-05,
"loss": 1.2915,
"step": 147
},
{
"epoch": 0.41,
"learning_rate": 5.521117827852039e-05,
"loss": 1.2045,
"step": 150
},
{
"epoch": 0.42,
"learning_rate": 5.499060866850155e-05,
"loss": 1.2928,
"step": 153
},
{
"epoch": 0.43,
"learning_rate": 5.4765534644052603e-05,
"loss": 1.4558,
"step": 156
},
{
"epoch": 0.44,
"learning_rate": 5.453599677348051e-05,
"loss": 1.2275,
"step": 159
},
{
"epoch": 0.45,
"learning_rate": 5.4302036429675e-05,
"loss": 1.1509,
"step": 162
},
{
"epoch": 0.46,
"learning_rate": 5.4063695782651316e-05,
"loss": 1.1652,
"step": 165
},
{
"epoch": 0.46,
"learning_rate": 5.3821017791949336e-05,
"loss": 1.2678,
"step": 168
},
{
"epoch": 0.47,
"learning_rate": 5.3574046198890354e-05,
"loss": 1.1852,
"step": 171
},
{
"epoch": 0.48,
"learning_rate": 5.3322825518692984e-05,
"loss": 1.1094,
"step": 174
},
{
"epoch": 0.49,
"learning_rate": 5.306740103244947e-05,
"loss": 1.2142,
"step": 177
},
{
"epoch": 0.5,
"learning_rate": 5.280781877896411e-05,
"loss": 1.2198,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 5.2544125546455004e-05,
"loss": 1.3378,
"step": 183
},
{
"epoch": 0.51,
"learning_rate": 5.22763688641207e-05,
"loss": 1.3045,
"step": 186
},
{
"epoch": 0.52,
"learning_rate": 5.200459699357337e-05,
"loss": 1.2095,
"step": 189
},
{
"epoch": 0.53,
"learning_rate": 5.1728858920139945e-05,
"loss": 1.1443,
"step": 192
},
{
"epoch": 0.54,
"learning_rate": 5.144920434403274e-05,
"loss": 1.1227,
"step": 195
},
{
"epoch": 0.55,
"learning_rate": 5.1165683671391305e-05,
"loss": 1.2854,
"step": 198
},
{
"epoch": 0.56,
"learning_rate": 5.087834800519701e-05,
"loss": 1.1764,
"step": 201
},
{
"epoch": 0.56,
"learning_rate": 5.0587249136062016e-05,
"loss": 1.2407,
"step": 204
},
{
"epoch": 0.57,
"learning_rate": 5.0292439532894285e-05,
"loss": 1.178,
"step": 207
},
{
"epoch": 0.58,
"learning_rate": 4.9993972333440435e-05,
"loss": 1.1965,
"step": 210
},
{
"epoch": 0.59,
"learning_rate": 4.969190133470789e-05,
"loss": 1.1649,
"step": 213
},
{
"epoch": 0.6,
"learning_rate": 4.9386280983268294e-05,
"loss": 1.2004,
"step": 216
},
{
"epoch": 0.6,
"learning_rate": 4.9077166365443846e-05,
"loss": 1.1254,
"step": 219
},
{
"epoch": 0.61,
"learning_rate": 4.876461319737833e-05,
"loss": 1.2298,
"step": 222
},
{
"epoch": 0.62,
"learning_rate": 4.84486778149945e-05,
"loss": 1.1731,
"step": 225
},
{
"epoch": 0.63,
"learning_rate": 4.812941716383998e-05,
"loss": 1.215,
"step": 228
},
{
"epoch": 0.64,
"learning_rate": 4.780688878882304e-05,
"loss": 1.2084,
"step": 231
},
{
"epoch": 0.65,
"learning_rate": 4.748115082384054e-05,
"loss": 1.2414,
"step": 234
},
{
"epoch": 0.65,
"learning_rate": 4.7152261981299595e-05,
"loss": 1.1482,
"step": 237
},
{
"epoch": 0.66,
"learning_rate": 4.682028154153497e-05,
"loss": 1.2993,
"step": 240
},
{
"epoch": 0.67,
"learning_rate": 4.648526934212418e-05,
"loss": 1.1218,
"step": 243
},
{
"epoch": 0.68,
"learning_rate": 4.614728576710212e-05,
"loss": 1.1394,
"step": 246
},
{
"epoch": 0.69,
"learning_rate": 4.5806391736077104e-05,
"loss": 1.2554,
"step": 249
},
{
"epoch": 0.7,
"learning_rate": 4.5462648693250564e-05,
"loss": 1.2632,
"step": 252
},
{
"epoch": 0.7,
"learning_rate": 4.5116118596342016e-05,
"loss": 1.1506,
"step": 255
},
{
"epoch": 0.71,
"learning_rate": 4.476686390542155e-05,
"loss": 1.1322,
"step": 258
},
{
"epoch": 0.72,
"learning_rate": 4.4414947571651744e-05,
"loss": 1.1961,
"step": 261
},
{
"epoch": 0.73,
"learning_rate": 4.406043302594111e-05,
"loss": 1.0792,
"step": 264
},
{
"epoch": 0.74,
"learning_rate": 4.370338416751103e-05,
"loss": 1.1068,
"step": 267
},
{
"epoch": 0.75,
"learning_rate": 4.3343865352378236e-05,
"loss": 1.1363,
"step": 270
},
{
"epoch": 0.75,
"learning_rate": 4.298194138175509e-05,
"loss": 1.1092,
"step": 273
},
{
"epoch": 0.76,
"learning_rate": 4.261767749036945e-05,
"loss": 1.1427,
"step": 276
},
{
"epoch": 0.77,
"learning_rate": 4.2251139334706525e-05,
"loss": 1.0705,
"step": 279
},
{
"epoch": 0.78,
"learning_rate": 4.1882392981174704e-05,
"loss": 1.2325,
"step": 282
},
{
"epoch": 0.79,
"learning_rate": 4.151150489419739e-05,
"loss": 1.2151,
"step": 285
},
{
"epoch": 0.8,
"learning_rate": 4.113854192423321e-05,
"loss": 1.2182,
"step": 288
},
{
"epoch": 0.8,
"learning_rate": 4.07635712957266e-05,
"loss": 1.121,
"step": 291
},
{
"epoch": 0.81,
"learning_rate": 4.0386660594990984e-05,
"loss": 1.0753,
"step": 294
},
{
"epoch": 0.82,
"learning_rate": 4.0007877758026695e-05,
"loss": 1.0541,
"step": 297
},
{
"epoch": 0.83,
"learning_rate": 3.962729105827595e-05,
"loss": 1.199,
"step": 300
},
{
"epoch": 0.84,
"learning_rate": 3.9244969094316925e-05,
"loss": 1.1103,
"step": 303
},
{
"epoch": 0.85,
"learning_rate": 3.886098077749924e-05,
"loss": 1.2349,
"step": 306
},
{
"epoch": 0.85,
"learning_rate": 3.847539531952311e-05,
"loss": 1.1671,
"step": 309
},
{
"epoch": 0.86,
"learning_rate": 3.808828221996432e-05,
"loss": 1.0252,
"step": 312
},
{
"epoch": 0.87,
"learning_rate": 3.7699711253747304e-05,
"loss": 1.0853,
"step": 315
},
{
"epoch": 0.88,
"learning_rate": 3.730975245856866e-05,
"loss": 1.109,
"step": 318
},
{
"epoch": 0.89,
"learning_rate": 3.691847612227321e-05,
"loss": 1.0942,
"step": 321
},
{
"epoch": 0.9,
"learning_rate": 3.652595277018502e-05,
"loss": 1.0104,
"step": 324
},
{
"epoch": 0.9,
"learning_rate": 3.613225315239567e-05,
"loss": 1.0905,
"step": 327
},
{
"epoch": 0.91,
"learning_rate": 3.573744823101187e-05,
"loss": 1.2126,
"step": 330
},
{
"epoch": 0.92,
"learning_rate": 3.5341609167365e-05,
"loss": 1.1056,
"step": 333
},
{
"epoch": 0.93,
"learning_rate": 3.49448073091847e-05,
"loss": 0.999,
"step": 336
},
{
"epoch": 0.94,
"learning_rate": 3.4547114177738776e-05,
"loss": 1.2254,
"step": 339
},
{
"epoch": 0.94,
"learning_rate": 3.4148601454941995e-05,
"loss": 1.1343,
"step": 342
},
{
"epoch": 0.95,
"learning_rate": 3.3749340970435756e-05,
"loss": 1.1502,
"step": 345
},
{
"epoch": 0.96,
"learning_rate": 3.3349404688641236e-05,
"loss": 1.1669,
"step": 348
},
{
"epoch": 0.97,
"learning_rate": 3.2948864695788215e-05,
"loss": 1.1723,
"step": 351
},
{
"epoch": 0.98,
"learning_rate": 3.2547793186921944e-05,
"loss": 1.1517,
"step": 354
},
{
"epoch": 0.99,
"learning_rate": 3.2146262452890414e-05,
"loss": 1.1205,
"step": 357
},
{
"epoch": 0.99,
"learning_rate": 3.174434486731428e-05,
"loss": 1.0379,
"step": 360
},
{
"epoch": 1.0,
"eval_gen_len": 14.561409630146546,
"eval_loss": 1.0931541919708252,
"eval_rouge1": 62.4953,
"eval_rouge2": 46.0277,
"eval_rougeL": 60.6748,
"eval_rougeLsum": 60.7667,
"eval_runtime": 234.2502,
"eval_samples_per_second": 12.235,
"eval_steps_per_second": 3.061,
"step": 362
},
{
"epoch": 1.0,
"learning_rate": 3.134211287354203e-05,
"loss": 1.112,
"step": 363
},
{
"epoch": 1.01,
"learning_rate": 3.093963897159241e-05,
"loss": 0.8889,
"step": 366
},
{
"epoch": 1.02,
"learning_rate": 3.053699570508673e-05,
"loss": 0.9652,
"step": 369
},
{
"epoch": 1.03,
"learning_rate": 3.0134255648173302e-05,
"loss": 0.8661,
"step": 372
},
{
"epoch": 1.04,
"learning_rate": 2.9731491392446363e-05,
"loss": 0.9428,
"step": 375
},
{
"epoch": 1.04,
"learning_rate": 2.932877553386181e-05,
"loss": 0.9765,
"step": 378
},
{
"epoch": 1.05,
"learning_rate": 2.8926180659652284e-05,
"loss": 0.9826,
"step": 381
},
{
"epoch": 1.06,
"learning_rate": 2.8523779335243655e-05,
"loss": 0.9222,
"step": 384
},
{
"epoch": 1.07,
"learning_rate": 2.8121644091175544e-05,
"loss": 0.9965,
"step": 387
},
{
"epoch": 1.08,
"learning_rate": 2.7719847410028125e-05,
"loss": 0.9529,
"step": 390
},
{
"epoch": 1.09,
"learning_rate": 2.731846171335753e-05,
"loss": 0.9828,
"step": 393
},
{
"epoch": 1.09,
"learning_rate": 2.691755934864228e-05,
"loss": 0.9637,
"step": 396
},
{
"epoch": 1.1,
"learning_rate": 2.651721257624309e-05,
"loss": 0.9401,
"step": 399
},
{
"epoch": 1.11,
"learning_rate": 2.6117493556378334e-05,
"loss": 0.8848,
"step": 402
},
{
"epoch": 1.12,
"learning_rate": 2.5718474336117575e-05,
"loss": 0.8801,
"step": 405
},
{
"epoch": 1.13,
"learning_rate": 2.5320226836395467e-05,
"loss": 1.0124,
"step": 408
},
{
"epoch": 1.14,
"learning_rate": 2.4922822839048498e-05,
"loss": 0.9359,
"step": 411
},
{
"epoch": 1.14,
"learning_rate": 2.4526333973876625e-05,
"loss": 0.9309,
"step": 414
},
{
"epoch": 1.15,
"learning_rate": 2.413083170573249e-05,
"loss": 0.8749,
"step": 417
},
{
"epoch": 1.16,
"learning_rate": 2.373638732164025e-05,
"loss": 0.9061,
"step": 420
},
{
"epoch": 1.17,
"learning_rate": 2.334307191794648e-05,
"loss": 0.9862,
"step": 423
},
{
"epoch": 1.18,
"learning_rate": 2.2950956387505536e-05,
"loss": 0.8481,
"step": 426
},
{
"epoch": 1.19,
"learning_rate": 2.256011140690145e-05,
"loss": 0.8782,
"step": 429
},
{
"epoch": 1.19,
"learning_rate": 2.217060742370889e-05,
"loss": 0.921,
"step": 432
},
{
"epoch": 1.2,
"learning_rate": 2.1782514643795427e-05,
"loss": 1.007,
"step": 435
},
{
"epoch": 1.21,
"learning_rate": 2.13959030186673e-05,
"loss": 0.9062,
"step": 438
},
{
"epoch": 1.22,
"learning_rate": 2.1010842232861043e-05,
"loss": 0.9572,
"step": 441
},
{
"epoch": 1.23,
"learning_rate": 2.0627401691383272e-05,
"loss": 0.9011,
"step": 444
},
{
"epoch": 1.23,
"learning_rate": 2.0245650507200847e-05,
"loss": 0.9206,
"step": 447
},
{
"epoch": 1.24,
"learning_rate": 1.986565748878359e-05,
"loss": 0.8911,
"step": 450
},
{
"epoch": 1.25,
"learning_rate": 1.9487491127701992e-05,
"loss": 1.0226,
"step": 453
},
{
"epoch": 1.26,
"learning_rate": 1.9111219586282026e-05,
"loss": 0.8751,
"step": 456
},
{
"epoch": 1.27,
"learning_rate": 1.8736910685319207e-05,
"loss": 0.919,
"step": 459
},
{
"epoch": 1.28,
"learning_rate": 1.8364631891854358e-05,
"loss": 0.9712,
"step": 462
},
{
"epoch": 1.28,
"learning_rate": 1.7994450307012992e-05,
"loss": 1.0513,
"step": 465
},
{
"epoch": 1.29,
"learning_rate": 1.762643265391079e-05,
"loss": 0.9719,
"step": 468
},
{
"epoch": 1.3,
"learning_rate": 1.7260645265627054e-05,
"loss": 0.9033,
"step": 471
},
{
"epoch": 1.31,
"learning_rate": 1.689715407324862e-05,
"loss": 0.9133,
"step": 474
},
{
"epoch": 1.32,
"learning_rate": 1.6536024593986135e-05,
"loss": 0.8444,
"step": 477
},
{
"epoch": 1.33,
"learning_rate": 1.6177321919364952e-05,
"loss": 0.9352,
"step": 480
},
{
"epoch": 1.33,
"learning_rate": 1.5821110703492722e-05,
"loss": 0.8796,
"step": 483
},
{
"epoch": 1.34,
"learning_rate": 1.5467455151405927e-05,
"loss": 0.9359,
"step": 486
},
{
"epoch": 1.35,
"learning_rate": 1.511641900749724e-05,
"loss": 0.9757,
"step": 489
},
{
"epoch": 1.36,
"learning_rate": 1.4768065544025973e-05,
"loss": 0.8956,
"step": 492
},
{
"epoch": 1.37,
"learning_rate": 1.442245754971362e-05,
"loss": 0.891,
"step": 495
},
{
"epoch": 1.38,
"learning_rate": 1.4079657318426557e-05,
"loss": 0.9023,
"step": 498
},
{
"epoch": 1.38,
"learning_rate": 1.3739726637947885e-05,
"loss": 0.9007,
"step": 501
},
{
"epoch": 1.39,
"learning_rate": 1.3402726778840592e-05,
"loss": 0.9241,
"step": 504
},
{
"epoch": 1.4,
"learning_rate": 1.3068718483403856e-05,
"loss": 0.9136,
"step": 507
},
{
"epoch": 1.41,
"learning_rate": 1.2737761954724591e-05,
"loss": 0.9191,
"step": 510
},
{
"epoch": 1.42,
"learning_rate": 1.240991684582619e-05,
"loss": 0.876,
"step": 513
},
{
"epoch": 1.43,
"learning_rate": 1.2085242248916421e-05,
"loss": 0.8866,
"step": 516
},
{
"epoch": 1.43,
"learning_rate": 1.1763796684736342e-05,
"loss": 0.8984,
"step": 519
},
{
"epoch": 1.44,
"learning_rate": 1.1445638092012354e-05,
"loss": 0.9028,
"step": 522
},
{
"epoch": 1.45,
"learning_rate": 1.1130823817013007e-05,
"loss": 0.8911,
"step": 525
},
{
"epoch": 1.46,
"learning_rate": 1.0819410603212712e-05,
"loss": 0.8475,
"step": 528
},
{
"epoch": 1.47,
"learning_rate": 1.051145458106398e-05,
"loss": 0.9157,
"step": 531
},
{
"epoch": 1.48,
"learning_rate": 1.0207011257880357e-05,
"loss": 0.8596,
"step": 534
},
{
"epoch": 1.48,
"learning_rate": 9.906135507831401e-06,
"loss": 0.9352,
"step": 537
},
{
"epoch": 1.49,
"learning_rate": 9.608881562052026e-06,
"loss": 0.972,
"step": 540
},
{
"epoch": 1.5,
"learning_rate": 9.315302998867629e-06,
"loss": 0.9492,
"step": 543
},
{
"epoch": 1.51,
"learning_rate": 9.02545273413686e-06,
"loss": 0.9214,
"step": 546
},
{
"epoch": 1.52,
"learning_rate": 8.739383011713901e-06,
"loss": 0.8892,
"step": 549
},
{
"epoch": 1.52,
"learning_rate": 8.457145394031782e-06,
"loss": 0.8614,
"step": 552
},
{
"epoch": 1.53,
"learning_rate": 8.178790752808538e-06,
"loss": 0.9187,
"step": 555
},
{
"epoch": 1.54,
"learning_rate": 7.904369259877887e-06,
"loss": 0.9423,
"step": 558
},
{
"epoch": 1.55,
"learning_rate": 7.633930378146047e-06,
"loss": 0.967,
"step": 561
},
{
"epoch": 1.56,
"learning_rate": 7.367522852676296e-06,
"loss": 0.8578,
"step": 564
},
{
"epoch": 1.57,
"learning_rate": 7.1051947019030035e-06,
"loss": 0.9503,
"step": 567
},
{
"epoch": 1.57,
"learning_rate": 6.846993208976548e-06,
"loss": 0.9362,
"step": 570
},
{
"epoch": 1.58,
"learning_rate": 6.592964913240825e-06,
"loss": 0.9468,
"step": 573
},
{
"epoch": 1.59,
"learning_rate": 6.3431556018447365e-06,
"loss": 0.9301,
"step": 576
},
{
"epoch": 1.6,
"learning_rate": 6.097610301489424e-06,
"loss": 0.8458,
"step": 579
},
{
"epoch": 1.61,
"learning_rate": 5.856373270312341e-06,
"loss": 0.8667,
"step": 582
},
{
"epoch": 1.62,
"learning_rate": 5.619487989910071e-06,
"loss": 0.8545,
"step": 585
},
{
"epoch": 1.62,
"learning_rate": 5.38699715750099e-06,
"loss": 0.967,
"step": 588
},
{
"epoch": 1.63,
"learning_rate": 5.158942678229295e-06,
"loss": 0.8883,
"step": 591
},
{
"epoch": 1.64,
"learning_rate": 4.935365657611912e-06,
"loss": 0.8734,
"step": 594
},
{
"epoch": 1.65,
"learning_rate": 4.716306394129405e-06,
"loss": 0.8855,
"step": 597
},
{
"epoch": 1.66,
"learning_rate": 4.501804371962442e-06,
"loss": 0.8343,
"step": 600
},
{
"epoch": 1.67,
"learning_rate": 4.291898253874972e-06,
"loss": 0.9347,
"step": 603
},
{
"epoch": 1.67,
"learning_rate": 4.086625874245497e-06,
"loss": 0.8844,
"step": 606
},
{
"epoch": 1.68,
"learning_rate": 3.886024232247624e-06,
"loss": 0.9906,
"step": 609
},
{
"epoch": 1.69,
"learning_rate": 3.690129485181201e-06,
"loss": 0.8875,
"step": 612
},
{
"epoch": 1.7,
"learning_rate": 3.4989769419551575e-06,
"loss": 0.8691,
"step": 615
},
{
"epoch": 1.71,
"learning_rate": 3.3126010567232644e-06,
"loss": 0.8623,
"step": 618
},
{
"epoch": 1.72,
"learning_rate": 3.1310354226739957e-06,
"loss": 0.9039,
"step": 621
},
{
"epoch": 1.72,
"learning_rate": 2.9543127659755197e-06,
"loss": 0.8618,
"step": 624
},
{
"epoch": 1.73,
"learning_rate": 2.7824649398770086e-06,
"loss": 0.8591,
"step": 627
},
{
"epoch": 1.74,
"learning_rate": 2.6155229189672757e-06,
"loss": 0.9535,
"step": 630
},
{
"epoch": 1.75,
"learning_rate": 2.4535167935917835e-06,
"loss": 0.9183,
"step": 633
},
{
"epoch": 1.76,
"learning_rate": 2.296475764429007e-06,
"loss": 0.9166,
"step": 636
},
{
"epoch": 1.77,
"learning_rate": 2.144428137227211e-06,
"loss": 0.9128,
"step": 639
},
{
"epoch": 1.77,
"learning_rate": 1.99740131770249e-06,
"loss": 0.9493,
"step": 642
},
{
"epoch": 1.78,
"learning_rate": 1.8554218065990246e-06,
"loss": 0.8549,
"step": 645
},
{
"epoch": 1.79,
"learning_rate": 1.7185151949125088e-06,
"loss": 0.83,
"step": 648
},
{
"epoch": 1.8,
"learning_rate": 1.5867061592774878e-06,
"loss": 0.8383,
"step": 651
},
{
"epoch": 1.81,
"learning_rate": 1.4600184575195486e-06,
"loss": 0.9107,
"step": 654
},
{
"epoch": 1.81,
"learning_rate": 1.3384749243731109e-06,
"loss": 0.906,
"step": 657
},
{
"epoch": 1.82,
"learning_rate": 1.2220974673655938e-06,
"loss": 0.8715,
"step": 660
},
{
"epoch": 1.83,
"learning_rate": 1.1109070628687068e-06,
"loss": 0.8247,
"step": 663
},
{
"epoch": 1.84,
"learning_rate": 1.0049237523175813e-06,
"loss": 0.9263,
"step": 666
},
{
"epoch": 1.85,
"learning_rate": 9.041666385984171e-07,
"loss": 0.8876,
"step": 669
},
{
"epoch": 1.86,
"learning_rate": 8.086538826052858e-07,
"loss": 0.9287,
"step": 672
},
{
"epoch": 1.86,
"learning_rate": 7.184026999667537e-07,
"loss": 0.8754,
"step": 675
},
{
"epoch": 1.87,
"learning_rate": 6.33429357942843e-07,
"loss": 0.8859,
"step": 678
},
{
"epoch": 1.88,
"learning_rate": 5.537491724929644e-07,
"loss": 0.889,
"step": 681
},
{
"epoch": 1.89,
"learning_rate": 4.793765055152955e-07,
"loss": 0.8693,
"step": 684
},
{
"epoch": 1.9,
"learning_rate": 4.103247622581441e-07,
"loss": 0.8027,
"step": 687
},
{
"epoch": 1.91,
"learning_rate": 3.466063889036986e-07,
"loss": 0.9616,
"step": 690
},
{
"epoch": 1.91,
"learning_rate": 2.8823287032470835e-07,
"loss": 0.9431,
"step": 693
},
{
"epoch": 1.92,
"learning_rate": 2.352147280143835e-07,
"loss": 0.8779,
"step": 696
},
{
"epoch": 1.93,
"learning_rate": 1.8756151818996148e-07,
"loss": 0.9092,
"step": 699
},
{
"epoch": 1.94,
"learning_rate": 1.452818300702452e-07,
"loss": 0.8256,
"step": 702
},
{
"epoch": 1.95,
"learning_rate": 1.0838328432745792e-07,
"loss": 0.8805,
"step": 705
},
{
"epoch": 1.96,
"learning_rate": 7.687253171365205e-08,
"loss": 0.8944,
"step": 708
},
{
"epoch": 1.96,
"learning_rate": 5.0755251861954734e-08,
"loss": 0.806,
"step": 711
},
{
"epoch": 1.97,
"learning_rate": 3.0036152262833404e-08,
"loss": 0.9051,
"step": 714
},
{
"epoch": 1.98,
"learning_rate": 1.4718967415617845e-08,
"loss": 0.89,
"step": 717
},
{
"epoch": 1.99,
"learning_rate": 4.806458155358629e-09,
"loss": 0.9656,
"step": 720
},
{
"epoch": 2.0,
"learning_rate": 3.004111552218358e-10,
"loss": 0.925,
"step": 723
},
{
"epoch": 2.0,
"eval_gen_len": 14.478018143754362,
"eval_loss": 1.0436240434646606,
"eval_rouge1": 64.4538,
"eval_rouge2": 47.8829,
"eval_rougeL": 62.5085,
"eval_rougeLsum": 62.6165,
"eval_runtime": 232.7829,
"eval_samples_per_second": 12.312,
"eval_steps_per_second": 3.08,
"step": 724
},
{
"epoch": 2.0,
"step": 724,
"total_flos": 1.004106246258688e+17,
"train_loss": 1.1152722031207374,
"train_runtime": 4961.932,
"train_samples_per_second": 9.338,
"train_steps_per_second": 0.146
}
],
"max_steps": 724,
"num_train_epochs": 2,
"total_flos": 1.004106246258688e+17,
"trial_name": null,
"trial_params": null
}