poetry-bygpt5-small-de / trainer_state.json
potamides's picture
add model files
e6efac7
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.99995686494414,
"global_step": 115910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 5.172413793103448e-07,
"loss": 2.1185,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 0.00012931034482758618,
"loss": 1.5115,
"step": 250
},
{
"epoch": 0.04,
"learning_rate": 0.00025862068965517237,
"loss": 1.2814,
"step": 500
},
{
"epoch": 0.06,
"learning_rate": 0.0003879310344827586,
"loss": 1.2284,
"step": 750
},
{
"epoch": 0.09,
"learning_rate": 0.0005172413793103447,
"loss": 1.2049,
"step": 1000
},
{
"epoch": 0.11,
"learning_rate": 0.0005999990893103199,
"loss": 1.1888,
"step": 1250
},
{
"epoch": 0.13,
"learning_rate": 0.0005999870030839344,
"loss": 1.1754,
"step": 1500
},
{
"epoch": 0.15,
"learning_rate": 0.0005999608636610692,
"loss": 1.1618,
"step": 1750
},
{
"epoch": 0.17,
"learning_rate": 0.0005999206722662517,
"loss": 1.1522,
"step": 2000
},
{
"epoch": 0.19,
"learning_rate": 0.0005998664307822891,
"loss": 1.1431,
"step": 2250
},
{
"epoch": 0.22,
"learning_rate": 0.0005997981417501787,
"loss": 1.139,
"step": 2500
},
{
"epoch": 0.24,
"learning_rate": 0.0005997158083689897,
"loss": 1.134,
"step": 2750
},
{
"epoch": 0.26,
"learning_rate": 0.0005996194344957133,
"loss": 1.1267,
"step": 3000
},
{
"epoch": 0.28,
"learning_rate": 0.0005995090246450815,
"loss": 1.1215,
"step": 3250
},
{
"epoch": 0.3,
"learning_rate": 0.0005993845839893562,
"loss": 1.1198,
"step": 3500
},
{
"epoch": 0.32,
"learning_rate": 0.0005992461183580866,
"loss": 1.1145,
"step": 3750
},
{
"epoch": 0.35,
"learning_rate": 0.0005990936342378362,
"loss": 1.1097,
"step": 4000
},
{
"epoch": 0.37,
"learning_rate": 0.0005989271387718786,
"loss": 1.1071,
"step": 4250
},
{
"epoch": 0.39,
"learning_rate": 0.0005987466397598635,
"loss": 1.1049,
"step": 4500
},
{
"epoch": 0.41,
"learning_rate": 0.0005985521456574509,
"loss": 1.1012,
"step": 4750
},
{
"epoch": 0.43,
"learning_rate": 0.0005983436655759147,
"loss": 1.0981,
"step": 5000
},
{
"epoch": 0.45,
"learning_rate": 0.0005981212092817168,
"loss": 1.0959,
"step": 5250
},
{
"epoch": 0.47,
"learning_rate": 0.0005978847871960486,
"loss": 1.092,
"step": 5500
},
{
"epoch": 0.5,
"learning_rate": 0.0005976344103943434,
"loss": 1.0911,
"step": 5750
},
{
"epoch": 0.52,
"learning_rate": 0.0005973700906057571,
"loss": 1.0878,
"step": 6000
},
{
"epoch": 0.54,
"learning_rate": 0.0005970918402126193,
"loss": 1.0856,
"step": 6250
},
{
"epoch": 0.56,
"learning_rate": 0.000596799672249853,
"loss": 1.0818,
"step": 6500
},
{
"epoch": 0.58,
"learning_rate": 0.0005964936004043636,
"loss": 1.0816,
"step": 6750
},
{
"epoch": 0.6,
"learning_rate": 0.0005961736390143983,
"loss": 1.0793,
"step": 7000
},
{
"epoch": 0.63,
"learning_rate": 0.0005958398030688742,
"loss": 1.0774,
"step": 7250
},
{
"epoch": 0.65,
"learning_rate": 0.0005954921082066758,
"loss": 1.0738,
"step": 7500
},
{
"epoch": 0.67,
"learning_rate": 0.0005951305707159228,
"loss": 1.0751,
"step": 7750
},
{
"epoch": 0.69,
"learning_rate": 0.0005947552075332069,
"loss": 1.0709,
"step": 8000
},
{
"epoch": 0.71,
"learning_rate": 0.0005943660362427983,
"loss": 1.0707,
"step": 8250
},
{
"epoch": 0.73,
"learning_rate": 0.0005939630750758225,
"loss": 1.0682,
"step": 8500
},
{
"epoch": 0.75,
"learning_rate": 0.0005935463429094053,
"loss": 1.0665,
"step": 8750
},
{
"epoch": 0.78,
"learning_rate": 0.0005931158592657892,
"loss": 1.0654,
"step": 9000
},
{
"epoch": 0.8,
"learning_rate": 0.0005926716443114187,
"loss": 1.0635,
"step": 9250
},
{
"epoch": 0.82,
"learning_rate": 0.0005922137188559952,
"loss": 1.0623,
"step": 9500
},
{
"epoch": 0.84,
"learning_rate": 0.0005917421043515032,
"loss": 1.0621,
"step": 9750
},
{
"epoch": 0.86,
"learning_rate": 0.0005912568228912035,
"loss": 1.0601,
"step": 10000
},
{
"epoch": 0.88,
"learning_rate": 0.0005907578972086006,
"loss": 1.0587,
"step": 10250
},
{
"epoch": 0.91,
"learning_rate": 0.0005902453506763757,
"loss": 1.057,
"step": 10500
},
{
"epoch": 0.93,
"learning_rate": 0.0005897192073052928,
"loss": 1.0566,
"step": 10750
},
{
"epoch": 0.95,
"learning_rate": 0.0005891794917430733,
"loss": 1.0567,
"step": 11000
},
{
"epoch": 0.97,
"learning_rate": 0.0005886262292732426,
"loss": 1.0535,
"step": 11250
},
{
"epoch": 0.99,
"learning_rate": 0.0005880594458139437,
"loss": 1.0534,
"step": 11500
},
{
"epoch": 1.0,
"eval_alliteration_score": 0.3529233565586186,
"eval_harmonic_meter_score": 0.15390579451720435,
"eval_harmonic_rhyme_score": 0.29888189056198133,
"eval_meter_score": 0.37094404850825824,
"eval_rhyme_score": 0.652105690320114,
"eval_runtime": 1096.4036,
"eval_samples_per_second": 2.463,
"eval_steps_per_second": 0.308,
"step": 11591
},
{
"epoch": 1.01,
"learning_rate": 0.0005874791679167249,
"loss": 1.0499,
"step": 11750
},
{
"epoch": 1.04,
"learning_rate": 0.0005868854227652948,
"loss": 1.0418,
"step": 12000
},
{
"epoch": 1.06,
"learning_rate": 0.0005862782381742493,
"loss": 1.0432,
"step": 12250
},
{
"epoch": 1.08,
"learning_rate": 0.0005856576425877683,
"loss": 1.0423,
"step": 12500
},
{
"epoch": 1.1,
"learning_rate": 0.0005850236650782842,
"loss": 1.0433,
"step": 12750
},
{
"epoch": 1.12,
"learning_rate": 0.0005843763353451184,
"loss": 1.0416,
"step": 13000
},
{
"epoch": 1.14,
"learning_rate": 0.0005837156837130911,
"loss": 1.0425,
"step": 13250
},
{
"epoch": 1.16,
"learning_rate": 0.0005830417411311002,
"loss": 1.0394,
"step": 13500
},
{
"epoch": 1.19,
"learning_rate": 0.0005823545391706722,
"loss": 1.0401,
"step": 13750
},
{
"epoch": 1.21,
"learning_rate": 0.0005816541100244823,
"loss": 1.0391,
"step": 14000
},
{
"epoch": 1.23,
"learning_rate": 0.0005809404865048467,
"loss": 1.0378,
"step": 14250
},
{
"epoch": 1.25,
"learning_rate": 0.0005802137020421853,
"loss": 1.0375,
"step": 14500
},
{
"epoch": 1.27,
"learning_rate": 0.0005794737906834562,
"loss": 1.0381,
"step": 14750
},
{
"epoch": 1.29,
"learning_rate": 0.0005787207870905601,
"loss": 1.0368,
"step": 15000
},
{
"epoch": 1.32,
"learning_rate": 0.0005779547265387164,
"loss": 1.0357,
"step": 15250
},
{
"epoch": 1.34,
"learning_rate": 0.0005771756449148116,
"loss": 1.0336,
"step": 15500
},
{
"epoch": 1.36,
"learning_rate": 0.0005763835787157171,
"loss": 1.0354,
"step": 15750
},
{
"epoch": 1.38,
"learning_rate": 0.0005755785650465805,
"loss": 1.035,
"step": 16000
},
{
"epoch": 1.4,
"learning_rate": 0.0005747606416190861,
"loss": 1.0331,
"step": 16250
},
{
"epoch": 1.42,
"learning_rate": 0.0005739298467496894,
"loss": 1.0329,
"step": 16500
},
{
"epoch": 1.45,
"learning_rate": 0.0005730862193578216,
"loss": 1.0316,
"step": 16750
},
{
"epoch": 1.47,
"learning_rate": 0.0005722297989640667,
"loss": 1.0333,
"step": 17000
},
{
"epoch": 1.49,
"learning_rate": 0.0005713606256883096,
"loss": 1.0299,
"step": 17250
},
{
"epoch": 1.51,
"learning_rate": 0.0005704787402478568,
"loss": 1.0304,
"step": 17500
},
{
"epoch": 1.53,
"learning_rate": 0.0005695841839555298,
"loss": 1.0291,
"step": 17750
},
{
"epoch": 1.55,
"learning_rate": 0.000568676998717728,
"loss": 1.0303,
"step": 18000
},
{
"epoch": 1.57,
"learning_rate": 0.0005677572270324676,
"loss": 1.0292,
"step": 18250
},
{
"epoch": 1.6,
"learning_rate": 0.0005668249119873892,
"loss": 1.0273,
"step": 18500
},
{
"epoch": 1.62,
"learning_rate": 0.00056588009725774,
"loss": 1.0277,
"step": 18750
},
{
"epoch": 1.64,
"learning_rate": 0.0005649228271043274,
"loss": 1.0266,
"step": 19000
},
{
"epoch": 1.66,
"learning_rate": 0.0005639531463714464,
"loss": 1.0252,
"step": 19250
},
{
"epoch": 1.68,
"learning_rate": 0.0005629711004847776,
"loss": 1.0254,
"step": 19500
},
{
"epoch": 1.7,
"learning_rate": 0.0005619767354492601,
"loss": 1.0257,
"step": 19750
},
{
"epoch": 1.73,
"learning_rate": 0.0005609700978469361,
"loss": 1.025,
"step": 20000
},
{
"epoch": 1.75,
"learning_rate": 0.0005599512348347686,
"loss": 1.0238,
"step": 20250
},
{
"epoch": 1.77,
"learning_rate": 0.0005589201941424324,
"loss": 1.0226,
"step": 20500
},
{
"epoch": 1.79,
"learning_rate": 0.000557877024070078,
"loss": 1.0232,
"step": 20750
},
{
"epoch": 1.81,
"learning_rate": 0.0005568217734860691,
"loss": 1.0233,
"step": 21000
},
{
"epoch": 1.83,
"learning_rate": 0.0005557544918246936,
"loss": 1.0206,
"step": 21250
},
{
"epoch": 1.85,
"learning_rate": 0.0005546752290838471,
"loss": 1.0211,
"step": 21500
},
{
"epoch": 1.88,
"learning_rate": 0.0005535840358226909,
"loss": 1.0221,
"step": 21750
},
{
"epoch": 1.9,
"learning_rate": 0.0005524809631592841,
"loss": 1.0207,
"step": 22000
},
{
"epoch": 1.92,
"learning_rate": 0.0005513660627681884,
"loss": 1.019,
"step": 22250
},
{
"epoch": 1.94,
"learning_rate": 0.0005502393868780472,
"loss": 1.0193,
"step": 22500
},
{
"epoch": 1.96,
"learning_rate": 0.0005491009882691392,
"loss": 1.019,
"step": 22750
},
{
"epoch": 1.98,
"learning_rate": 0.000547950920270906,
"loss": 1.0172,
"step": 23000
},
{
"epoch": 2.0,
"eval_alliteration_score": 0.42727272727272725,
"eval_harmonic_meter_score": 0.20045849813764974,
"eval_harmonic_rhyme_score": 0.5208998846302836,
"eval_meter_score": 0.43620708634823174,
"eval_rhyme_score": 0.8331009411550936,
"eval_runtime": 1187.4498,
"eval_samples_per_second": 2.274,
"eval_steps_per_second": 0.285,
"step": 23182
},
{
"epoch": 2.01,
"learning_rate": 0.0005467892367594536,
"loss": 1.0168,
"step": 23250
},
{
"epoch": 2.03,
"learning_rate": 0.0005456159921550282,
"loss": 1.0074,
"step": 23500
},
{
"epoch": 2.05,
"learning_rate": 0.0005444312414194676,
"loss": 1.0084,
"step": 23750
},
{
"epoch": 2.07,
"learning_rate": 0.0005432350400536257,
"loss": 1.0082,
"step": 24000
},
{
"epoch": 2.09,
"learning_rate": 0.0005420274440947728,
"loss": 1.0091,
"step": 24250
},
{
"epoch": 2.11,
"learning_rate": 0.0005408085101139711,
"loss": 1.0069,
"step": 24500
},
{
"epoch": 2.14,
"learning_rate": 0.0005395782952134231,
"loss": 1.0079,
"step": 24750
},
{
"epoch": 2.16,
"learning_rate": 0.0005383368570237981,
"loss": 1.0084,
"step": 25000
},
{
"epoch": 2.18,
"learning_rate": 0.0005370842537015316,
"loss": 1.0084,
"step": 25250
},
{
"epoch": 2.2,
"learning_rate": 0.0005358205439261011,
"loss": 1.0084,
"step": 25500
},
{
"epoch": 2.22,
"learning_rate": 0.0005345457868972771,
"loss": 1.0078,
"step": 25750
},
{
"epoch": 2.24,
"learning_rate": 0.0005332600423323502,
"loss": 1.0082,
"step": 26000
},
{
"epoch": 2.26,
"learning_rate": 0.0005319633704633331,
"loss": 1.0064,
"step": 26250
},
{
"epoch": 2.29,
"learning_rate": 0.0005306558320341392,
"loss": 1.0069,
"step": 26500
},
{
"epoch": 2.31,
"learning_rate": 0.0005293374882977369,
"loss": 1.0067,
"step": 26750
},
{
"epoch": 2.33,
"learning_rate": 0.0005280084010132805,
"loss": 1.0057,
"step": 27000
},
{
"epoch": 2.35,
"learning_rate": 0.0005266686324432164,
"loss": 1.0074,
"step": 27250
},
{
"epoch": 2.37,
"learning_rate": 0.000525318245350367,
"loss": 1.0048,
"step": 27500
},
{
"epoch": 2.39,
"learning_rate": 0.0005239573029949899,
"loss": 1.0048,
"step": 27750
},
{
"epoch": 2.42,
"learning_rate": 0.0005225858691318151,
"loss": 1.0061,
"step": 28000
},
{
"epoch": 2.44,
"learning_rate": 0.0005212040080070575,
"loss": 1.0045,
"step": 28250
},
{
"epoch": 2.46,
"learning_rate": 0.0005198117843554081,
"loss": 1.0039,
"step": 28500
},
{
"epoch": 2.48,
"learning_rate": 0.0005184092633970007,
"loss": 1.0032,
"step": 28750
},
{
"epoch": 2.5,
"learning_rate": 0.0005169965108343571,
"loss": 1.0026,
"step": 29000
},
{
"epoch": 2.52,
"learning_rate": 0.000515573592849309,
"loss": 1.0038,
"step": 29250
},
{
"epoch": 2.55,
"learning_rate": 0.0005141405760998976,
"loss": 1.0011,
"step": 29500
},
{
"epoch": 2.57,
"learning_rate": 0.0005126975277172511,
"loss": 1.0014,
"step": 29750
},
{
"epoch": 2.59,
"learning_rate": 0.0005112445153024395,
"loss": 1.0017,
"step": 30000
},
{
"epoch": 2.61,
"learning_rate": 0.0005097816069233083,
"loss": 1.0012,
"step": 30250
},
{
"epoch": 2.63,
"learning_rate": 0.0005083088711112896,
"loss": 0.9992,
"step": 30500
},
{
"epoch": 2.65,
"learning_rate": 0.0005068263768581912,
"loss": 1.0003,
"step": 30750
},
{
"epoch": 2.67,
"learning_rate": 0.0005053341936129654,
"loss": 1.0,
"step": 31000
},
{
"epoch": 2.7,
"learning_rate": 0.0005038323912784549,
"loss": 0.9995,
"step": 31250
},
{
"epoch": 2.72,
"learning_rate": 0.0005023210402081186,
"loss": 0.9998,
"step": 31500
},
{
"epoch": 2.74,
"learning_rate": 0.0005008002112027358,
"loss": 0.9994,
"step": 31750
},
{
"epoch": 2.76,
"learning_rate": 0.000499269975507089,
"loss": 0.9982,
"step": 32000
},
{
"epoch": 2.78,
"learning_rate": 0.0004977304048066266,
"loss": 0.9975,
"step": 32250
},
{
"epoch": 2.8,
"learning_rate": 0.0004961815712241053,
"loss": 0.9977,
"step": 32500
},
{
"epoch": 2.83,
"learning_rate": 0.0004946235473162105,
"loss": 0.9982,
"step": 32750
},
{
"epoch": 2.85,
"learning_rate": 0.0004930564060701579,
"loss": 0.9968,
"step": 33000
},
{
"epoch": 2.87,
"learning_rate": 0.0004914802209002742,
"loss": 0.9981,
"step": 33250
},
{
"epoch": 2.89,
"learning_rate": 0.0004898950656445579,
"loss": 0.9971,
"step": 33500
},
{
"epoch": 2.91,
"learning_rate": 0.0004883010145612202,
"loss": 0.9956,
"step": 33750
},
{
"epoch": 2.93,
"learning_rate": 0.00048669814232520656,
"loss": 0.9953,
"step": 34000
},
{
"epoch": 2.95,
"learning_rate": 0.0004850865240246984,
"loss": 0.9946,
"step": 34250
},
{
"epoch": 2.98,
"learning_rate": 0.000483466235157595,
"loss": 0.9945,
"step": 34500
},
{
"epoch": 3.0,
"learning_rate": 0.00048183735162797754,
"loss": 0.9952,
"step": 34750
},
{
"epoch": 3.0,
"eval_alliteration_score": 0.4316546762589928,
"eval_harmonic_meter_score": 0.21624795235944666,
"eval_harmonic_rhyme_score": 0.5329191903361169,
"eval_meter_score": 0.4587688559480128,
"eval_rhyme_score": 0.8467749122975762,
"eval_runtime": 1157.9301,
"eval_samples_per_second": 2.332,
"eval_steps_per_second": 0.292,
"step": 34773
},
{
"epoch": 3.02,
"learning_rate": 0.00048019994974255294,
"loss": 0.9837,
"step": 35000
},
{
"epoch": 3.04,
"learning_rate": 0.0004785541062070789,
"loss": 0.9825,
"step": 35250
},
{
"epoch": 3.06,
"learning_rate": 0.00047689989812277095,
"loss": 0.9844,
"step": 35500
},
{
"epoch": 3.08,
"learning_rate": 0.00047523740298269045,
"loss": 0.9843,
"step": 35750
},
{
"epoch": 3.11,
"learning_rate": 0.0004735666986681142,
"loss": 0.9834,
"step": 36000
},
{
"epoch": 3.13,
"learning_rate": 0.00047188786344488643,
"loss": 0.983,
"step": 36250
},
{
"epoch": 3.15,
"learning_rate": 0.0004702009759597517,
"loss": 0.9841,
"step": 36500
},
{
"epoch": 3.17,
"learning_rate": 0.0004685061152366711,
"loss": 0.9836,
"step": 36750
},
{
"epoch": 3.19,
"learning_rate": 0.00046680336067312034,
"loss": 0.9848,
"step": 37000
},
{
"epoch": 3.21,
"learning_rate": 0.0004650927920363698,
"loss": 0.9833,
"step": 37250
},
{
"epoch": 3.24,
"learning_rate": 0.00046337448945974846,
"loss": 0.9819,
"step": 37500
},
{
"epoch": 3.26,
"learning_rate": 0.00046164853343888914,
"loss": 0.9827,
"step": 37750
},
{
"epoch": 3.28,
"learning_rate": 0.0004599150048279585,
"loss": 0.9835,
"step": 38000
},
{
"epoch": 3.3,
"learning_rate": 0.0004581739848358684,
"loss": 0.9822,
"step": 38250
},
{
"epoch": 3.32,
"learning_rate": 0.0004564255550224727,
"loss": 0.9823,
"step": 38500
},
{
"epoch": 3.34,
"learning_rate": 0.0004546697972947452,
"loss": 0.981,
"step": 38750
},
{
"epoch": 3.36,
"learning_rate": 0.00045290679390294386,
"loss": 0.9815,
"step": 39000
},
{
"epoch": 3.39,
"learning_rate": 0.0004511366274367566,
"loss": 0.9817,
"step": 39250
},
{
"epoch": 3.41,
"learning_rate": 0.0004493593808214336,
"loss": 0.9814,
"step": 39500
},
{
"epoch": 3.43,
"learning_rate": 0.0004475751373139011,
"loss": 0.981,
"step": 39750
},
{
"epoch": 3.45,
"learning_rate": 0.00044578398049886233,
"loss": 0.9797,
"step": 40000
},
{
"epoch": 3.47,
"learning_rate": 0.00044398599428488114,
"loss": 0.981,
"step": 40250
},
{
"epoch": 3.49,
"learning_rate": 0.00044218126290045183,
"loss": 0.9814,
"step": 40500
},
{
"epoch": 3.52,
"learning_rate": 0.00044036987089005275,
"loss": 0.9796,
"step": 40750
},
{
"epoch": 3.54,
"learning_rate": 0.0004385519031101863,
"loss": 0.9789,
"step": 41000
},
{
"epoch": 3.56,
"learning_rate": 0.0004367274447254036,
"loss": 0.9791,
"step": 41250
},
{
"epoch": 3.58,
"learning_rate": 0.0004348965812043143,
"loss": 0.9782,
"step": 41500
},
{
"epoch": 3.6,
"learning_rate": 0.00043305939831558366,
"loss": 0.9779,
"step": 41750
},
{
"epoch": 3.62,
"learning_rate": 0.0004312159821239138,
"loss": 0.9787,
"step": 42000
},
{
"epoch": 3.65,
"learning_rate": 0.0004293664189860124,
"loss": 0.9767,
"step": 42250
},
{
"epoch": 3.67,
"learning_rate": 0.0004275107955465473,
"loss": 0.9765,
"step": 42500
},
{
"epoch": 3.69,
"learning_rate": 0.000425649198734087,
"loss": 0.9756,
"step": 42750
},
{
"epoch": 3.71,
"learning_rate": 0.0004237817157570289,
"loss": 0.9763,
"step": 43000
},
{
"epoch": 3.73,
"learning_rate": 0.00042190843409951374,
"loss": 0.9757,
"step": 43250
},
{
"epoch": 3.75,
"learning_rate": 0.00042002944151732724,
"loss": 0.9756,
"step": 43500
},
{
"epoch": 3.77,
"learning_rate": 0.00041814482603378945,
"loss": 0.9735,
"step": 43750
},
{
"epoch": 3.8,
"learning_rate": 0.00041625467593563083,
"loss": 0.9752,
"step": 44000
},
{
"epoch": 3.82,
"learning_rate": 0.00041435907976885645,
"loss": 0.9719,
"step": 44250
},
{
"epoch": 3.84,
"learning_rate": 0.000412458126334598,
"loss": 0.9736,
"step": 44500
},
{
"epoch": 3.86,
"learning_rate": 0.000410551904684954,
"loss": 0.9745,
"step": 44750
},
{
"epoch": 3.88,
"learning_rate": 0.0004086405041188175,
"loss": 0.9739,
"step": 45000
},
{
"epoch": 3.9,
"learning_rate": 0.0004067240141776935,
"loss": 0.9714,
"step": 45250
},
{
"epoch": 3.93,
"learning_rate": 0.0004048025246415039,
"loss": 0.973,
"step": 45500
},
{
"epoch": 3.95,
"learning_rate": 0.0004028761255243818,
"loss": 0.9724,
"step": 45750
},
{
"epoch": 3.97,
"learning_rate": 0.00040094490707045426,
"loss": 0.9722,
"step": 46000
},
{
"epoch": 3.99,
"learning_rate": 0.0003990089597496154,
"loss": 0.9716,
"step": 46250
},
{
"epoch": 4.0,
"eval_alliteration_score": 0.43804537521815007,
"eval_harmonic_meter_score": 0.21317668443125748,
"eval_harmonic_rhyme_score": 0.5018255179872321,
"eval_meter_score": 0.45256004654409726,
"eval_rhyme_score": 0.8339540595241286,
"eval_runtime": 1179.1819,
"eval_samples_per_second": 2.29,
"eval_steps_per_second": 0.287,
"step": 46364
},
{
"epoch": 4.01,
"learning_rate": 0.0003970683742532878,
"loss": 0.9648,
"step": 46500
},
{
"epoch": 4.03,
"learning_rate": 0.00039512324149017415,
"loss": 0.9568,
"step": 46750
},
{
"epoch": 4.05,
"learning_rate": 0.00039317365258199856,
"loss": 0.9576,
"step": 47000
},
{
"epoch": 4.08,
"learning_rate": 0.0003912196988592375,
"loss": 0.9586,
"step": 47250
},
{
"epoch": 4.1,
"learning_rate": 0.0003892614718568419,
"loss": 0.9585,
"step": 47500
},
{
"epoch": 4.12,
"learning_rate": 0.0003872990633099488,
"loss": 0.9601,
"step": 47750
},
{
"epoch": 4.14,
"learning_rate": 0.00038533256514958366,
"loss": 0.9588,
"step": 48000
},
{
"epoch": 4.16,
"learning_rate": 0.0003833620694983544,
"loss": 0.9577,
"step": 48250
},
{
"epoch": 4.18,
"learning_rate": 0.0003813876686661354,
"loss": 0.9575,
"step": 48500
},
{
"epoch": 4.21,
"learning_rate": 0.000379409455145743,
"loss": 0.9585,
"step": 48750
},
{
"epoch": 4.23,
"learning_rate": 0.0003774275216086031,
"loss": 0.959,
"step": 49000
},
{
"epoch": 4.25,
"learning_rate": 0.0003754419609004093,
"loss": 0.9574,
"step": 49250
},
{
"epoch": 4.27,
"learning_rate": 0.0003734528660367739,
"loss": 0.9575,
"step": 49500
},
{
"epoch": 4.29,
"learning_rate": 0.0003714603301988702,
"loss": 0.9561,
"step": 49750
},
{
"epoch": 4.31,
"learning_rate": 0.00036946444672906754,
"loss": 0.9581,
"step": 50000
},
{
"epoch": 4.34,
"learning_rate": 0.00036746530912655837,
"loss": 0.9569,
"step": 50250
},
{
"epoch": 4.36,
"learning_rate": 0.0003654630110429785,
"loss": 0.956,
"step": 50500
},
{
"epoch": 4.38,
"learning_rate": 0.0003634576462780193,
"loss": 0.9559,
"step": 50750
},
{
"epoch": 4.4,
"learning_rate": 0.0003614493087750344,
"loss": 0.9559,
"step": 51000
},
{
"epoch": 4.42,
"learning_rate": 0.00035943809261663816,
"loss": 0.9537,
"step": 51250
},
{
"epoch": 4.44,
"learning_rate": 0.0003574240920202984,
"loss": 0.9524,
"step": 51500
},
{
"epoch": 4.46,
"learning_rate": 0.000355407401333923,
"loss": 0.9553,
"step": 51750
},
{
"epoch": 4.49,
"learning_rate": 0.0003533881150314395,
"loss": 0.9539,
"step": 52000
},
{
"epoch": 4.51,
"learning_rate": 0.00035136632770837013,
"loss": 0.9529,
"step": 52250
},
{
"epoch": 4.53,
"learning_rate": 0.0003493421340773996,
"loss": 0.9533,
"step": 52500
},
{
"epoch": 4.55,
"learning_rate": 0.000347315628963939,
"loss": 0.9522,
"step": 52750
},
{
"epoch": 4.57,
"learning_rate": 0.00034528690730168256,
"loss": 0.9525,
"step": 53000
},
{
"epoch": 4.59,
"learning_rate": 0.0003432560641281616,
"loss": 0.9522,
"step": 53250
},
{
"epoch": 4.62,
"learning_rate": 0.00034122319458029137,
"loss": 0.9513,
"step": 53500
},
{
"epoch": 4.64,
"learning_rate": 0.0003391883938899151,
"loss": 0.9512,
"step": 53750
},
{
"epoch": 4.66,
"learning_rate": 0.00033715175737934175,
"loss": 0.9495,
"step": 54000
},
{
"epoch": 4.68,
"learning_rate": 0.0003351133804568817,
"loss": 0.9519,
"step": 54250
},
{
"epoch": 4.7,
"learning_rate": 0.00033307335861237653,
"loss": 0.9502,
"step": 54500
},
{
"epoch": 4.72,
"learning_rate": 0.00033103178741272587,
"loss": 0.9481,
"step": 54750
},
{
"epoch": 4.75,
"learning_rate": 0.0003289887624974103,
"loss": 0.9505,
"step": 55000
},
{
"epoch": 4.77,
"learning_rate": 0.0003269443795740114,
"loss": 0.9481,
"step": 55250
},
{
"epoch": 4.79,
"learning_rate": 0.00032489873441372786,
"loss": 0.9488,
"step": 55500
},
{
"epoch": 4.81,
"learning_rate": 0.0003228519228468893,
"loss": 0.9471,
"step": 55750
},
{
"epoch": 4.83,
"learning_rate": 0.00032080404075846686,
"loss": 0.9474,
"step": 56000
},
{
"epoch": 4.85,
"learning_rate": 0.0003187551840835811,
"loss": 0.9475,
"step": 56250
},
{
"epoch": 4.87,
"learning_rate": 0.0003167054488030084,
"loss": 0.9469,
"step": 56500
},
{
"epoch": 4.9,
"learning_rate": 0.000314654930938684,
"loss": 0.9456,
"step": 56750
},
{
"epoch": 4.92,
"learning_rate": 0.00031260372654920427,
"loss": 0.9449,
"step": 57000
},
{
"epoch": 4.94,
"learning_rate": 0.00031055193172532656,
"loss": 0.9451,
"step": 57250
},
{
"epoch": 4.96,
"learning_rate": 0.00030849964258546764,
"loss": 0.9445,
"step": 57500
},
{
"epoch": 4.98,
"learning_rate": 0.0003064469552712011,
"loss": 0.9436,
"step": 57750
},
{
"epoch": 5.0,
"eval_alliteration_score": 0.448066935949221,
"eval_harmonic_meter_score": 0.21530994103058235,
"eval_harmonic_rhyme_score": 0.5565157424339893,
"eval_meter_score": 0.4616261926927596,
"eval_rhyme_score": 0.8542501571432405,
"eval_runtime": 1173.576,
"eval_samples_per_second": 2.301,
"eval_steps_per_second": 0.288,
"step": 57955
},
{
"epoch": 5.0,
"learning_rate": 0.00030439396594275327,
"loss": 0.9417,
"step": 58000
},
{
"epoch": 5.03,
"learning_rate": 0.00030234077077449876,
"loss": 0.9273,
"step": 58250
},
{
"epoch": 5.05,
"learning_rate": 0.0003002874659504549,
"loss": 0.9289,
"step": 58500
},
{
"epoch": 5.07,
"learning_rate": 0.00029823414765977604,
"loss": 0.9294,
"step": 58750
},
{
"epoch": 5.09,
"learning_rate": 0.00029618091209224727,
"loss": 0.9298,
"step": 59000
},
{
"epoch": 5.11,
"learning_rate": 0.0002941278554337787,
"loss": 0.9286,
"step": 59250
},
{
"epoch": 5.13,
"learning_rate": 0.00029207507386189873,
"loss": 0.9288,
"step": 59500
},
{
"epoch": 5.15,
"learning_rate": 0.0002900226635412495,
"loss": 0.929,
"step": 59750
},
{
"epoch": 5.18,
"learning_rate": 0.0002879707206190815,
"loss": 0.9284,
"step": 60000
},
{
"epoch": 5.2,
"learning_rate": 0.0002859193412207492,
"loss": 0.9289,
"step": 60250
},
{
"epoch": 5.22,
"learning_rate": 0.0002838686214452084,
"loss": 0.9272,
"step": 60500
},
{
"epoch": 5.24,
"learning_rate": 0.00028181865736051414,
"loss": 0.9271,
"step": 60750
},
{
"epoch": 5.26,
"learning_rate": 0.0002797695449993205,
"loss": 0.9268,
"step": 61000
},
{
"epoch": 5.28,
"learning_rate": 0.0002777213803543814,
"loss": 0.928,
"step": 61250
},
{
"epoch": 5.31,
"learning_rate": 0.00027567425937405436,
"loss": 0.9263,
"step": 61500
},
{
"epoch": 5.33,
"learning_rate": 0.00027362827795780515,
"loss": 0.9264,
"step": 61750
},
{
"epoch": 5.35,
"learning_rate": 0.0002715835319517157,
"loss": 0.9259,
"step": 62000
},
{
"epoch": 5.37,
"learning_rate": 0.0002695401171439936,
"loss": 0.9253,
"step": 62250
},
{
"epoch": 5.39,
"learning_rate": 0.0002674981292604852,
"loss": 0.9244,
"step": 62500
},
{
"epoch": 5.41,
"learning_rate": 0.00026545766396019144,
"loss": 0.9247,
"step": 62750
},
{
"epoch": 5.44,
"learning_rate": 0.0002634188168307859,
"loss": 0.9255,
"step": 63000
},
{
"epoch": 5.46,
"learning_rate": 0.00026138168338413746,
"loss": 0.9238,
"step": 63250
},
{
"epoch": 5.48,
"learning_rate": 0.00025934635905183577,
"loss": 0.9228,
"step": 63500
},
{
"epoch": 5.5,
"learning_rate": 0.00025731293918072076,
"loss": 0.9244,
"step": 63750
},
{
"epoch": 5.52,
"learning_rate": 0.00025528151902841584,
"loss": 0.9229,
"step": 64000
},
{
"epoch": 5.54,
"learning_rate": 0.00025325219375886554,
"loss": 0.9231,
"step": 64250
},
{
"epoch": 5.56,
"learning_rate": 0.0002512250584378775,
"loss": 0.9222,
"step": 64500
},
{
"epoch": 5.59,
"learning_rate": 0.00024920020802866925,
"loss": 0.9221,
"step": 64750
},
{
"epoch": 5.61,
"learning_rate": 0.00024717773738741866,
"loss": 0.9218,
"step": 65000
},
{
"epoch": 5.63,
"learning_rate": 0.0002451577412588216,
"loss": 0.9193,
"step": 65250
},
{
"epoch": 5.65,
"learning_rate": 0.00024314031427165252,
"loss": 0.9192,
"step": 65500
},
{
"epoch": 5.67,
"learning_rate": 0.00024112555093433206,
"loss": 0.9185,
"step": 65750
},
{
"epoch": 5.69,
"learning_rate": 0.0002391135456304992,
"loss": 0.9183,
"step": 66000
},
{
"epoch": 5.72,
"learning_rate": 0.0002371043926145902,
"loss": 0.9181,
"step": 66250
},
{
"epoch": 5.74,
"learning_rate": 0.00023509818600742305,
"loss": 0.9167,
"step": 66500
},
{
"epoch": 5.76,
"learning_rate": 0.0002330950197917882,
"loss": 0.9175,
"step": 66750
},
{
"epoch": 5.78,
"learning_rate": 0.00023109498780804545,
"loss": 0.9162,
"step": 67000
},
{
"epoch": 5.8,
"learning_rate": 0.000229098183749729,
"loss": 0.9156,
"step": 67250
},
{
"epoch": 5.82,
"learning_rate": 0.00022710470115915718,
"loss": 0.9166,
"step": 67500
},
{
"epoch": 5.85,
"learning_rate": 0.00022511463342305096,
"loss": 0.9145,
"step": 67750
},
{
"epoch": 5.87,
"learning_rate": 0.0002231280737681587,
"loss": 0.9136,
"step": 68000
},
{
"epoch": 5.89,
"learning_rate": 0.0002211451152568894,
"loss": 0.9133,
"step": 68250
},
{
"epoch": 5.91,
"learning_rate": 0.00021916585078295267,
"loss": 0.9126,
"step": 68500
},
{
"epoch": 5.93,
"learning_rate": 0.00021719037306700746,
"loss": 0.9136,
"step": 68750
},
{
"epoch": 5.95,
"learning_rate": 0.00021521877465231773,
"loss": 0.9117,
"step": 69000
},
{
"epoch": 5.97,
"learning_rate": 0.00021325114790041793,
"loss": 0.9118,
"step": 69250
},
{
"epoch": 6.0,
"learning_rate": 0.0002112875849867861,
"loss": 0.9096,
"step": 69500
},
{
"epoch": 6.0,
"eval_alliteration_score": 0.467425320056899,
"eval_harmonic_meter_score": 0.205647038467351,
"eval_harmonic_rhyme_score": 0.5379127523573549,
"eval_meter_score": 0.44894757325407986,
"eval_rhyme_score": 0.8296400338920163,
"eval_runtime": 1108.8436,
"eval_samples_per_second": 2.435,
"eval_steps_per_second": 0.305,
"step": 69546
},
{
"epoch": 6.02,
"learning_rate": 0.00020932817789652498,
"loss": 0.8977,
"step": 69750
},
{
"epoch": 6.04,
"learning_rate": 0.0002073730184200542,
"loss": 0.8935,
"step": 70000
},
{
"epoch": 6.06,
"learning_rate": 0.0002054221981488093,
"loss": 0.893,
"step": 70250
},
{
"epoch": 6.08,
"learning_rate": 0.00020347580847095156,
"loss": 0.8944,
"step": 70500
},
{
"epoch": 6.1,
"learning_rate": 0.00020153394056708618,
"loss": 0.8963,
"step": 70750
},
{
"epoch": 6.13,
"learning_rate": 0.0001995966854059916,
"loss": 0.8953,
"step": 71000
},
{
"epoch": 6.15,
"learning_rate": 0.0001976641337403576,
"loss": 0.8938,
"step": 71250
},
{
"epoch": 6.17,
"learning_rate": 0.0001957363761025339,
"loss": 0.8928,
"step": 71500
},
{
"epoch": 6.19,
"learning_rate": 0.0001938135028002889,
"loss": 0.8931,
"step": 71750
},
{
"epoch": 6.21,
"learning_rate": 0.00019189560391257962,
"loss": 0.8941,
"step": 72000
},
{
"epoch": 6.23,
"learning_rate": 0.00018998276928533143,
"loss": 0.8933,
"step": 72250
},
{
"epoch": 6.25,
"learning_rate": 0.00018807508852722944,
"loss": 0.8923,
"step": 72500
},
{
"epoch": 6.28,
"learning_rate": 0.00018617265100552005,
"loss": 0.8914,
"step": 72750
},
{
"epoch": 6.3,
"learning_rate": 0.00018427554584182545,
"loss": 0.8903,
"step": 73000
},
{
"epoch": 6.32,
"learning_rate": 0.0001823838619079677,
"loss": 0.8923,
"step": 73250
},
{
"epoch": 6.34,
"learning_rate": 0.00018049768782180627,
"loss": 0.8893,
"step": 73500
},
{
"epoch": 6.36,
"learning_rate": 0.0001786171119430857,
"loss": 0.8913,
"step": 73750
},
{
"epoch": 6.38,
"learning_rate": 0.0001767422223692971,
"loss": 0.8896,
"step": 74000
},
{
"epoch": 6.41,
"learning_rate": 0.00017487310693155088,
"loss": 0.8891,
"step": 74250
},
{
"epoch": 6.43,
"learning_rate": 0.00017300985319046187,
"loss": 0.8883,
"step": 74500
},
{
"epoch": 6.45,
"learning_rate": 0.00017115254843204803,
"loss": 0.8891,
"step": 74750
},
{
"epoch": 6.47,
"learning_rate": 0.0001693012796636411,
"loss": 0.8876,
"step": 75000
},
{
"epoch": 6.49,
"learning_rate": 0.0001674561336098108,
"loss": 0.8879,
"step": 75250
},
{
"epoch": 6.51,
"learning_rate": 0.0001656171967083019,
"loss": 0.8867,
"step": 75500
},
{
"epoch": 6.54,
"learning_rate": 0.00016378455510598517,
"loss": 0.8857,
"step": 75750
},
{
"epoch": 6.56,
"learning_rate": 0.00016195829465482172,
"loss": 0.8843,
"step": 76000
},
{
"epoch": 6.58,
"learning_rate": 0.00016013850090784126,
"loss": 0.8848,
"step": 76250
},
{
"epoch": 6.6,
"learning_rate": 0.00015832525911513371,
"loss": 0.8835,
"step": 76500
},
{
"epoch": 6.62,
"learning_rate": 0.00015651865421985648,
"loss": 0.8837,
"step": 76750
},
{
"epoch": 6.64,
"learning_rate": 0.00015471877085425465,
"loss": 0.8843,
"step": 77000
},
{
"epoch": 6.66,
"learning_rate": 0.0001529256933356964,
"loss": 0.8826,
"step": 77250
},
{
"epoch": 6.69,
"learning_rate": 0.00015113950566272296,
"loss": 0.8817,
"step": 77500
},
{
"epoch": 6.71,
"learning_rate": 0.00014936029151111374,
"loss": 0.8817,
"step": 77750
},
{
"epoch": 6.73,
"learning_rate": 0.00014758813422996664,
"loss": 0.8811,
"step": 78000
},
{
"epoch": 6.75,
"learning_rate": 0.00014582311683779313,
"loss": 0.8808,
"step": 78250
},
{
"epoch": 6.77,
"learning_rate": 0.00014406532201862937,
"loss": 0.8793,
"step": 78500
},
{
"epoch": 6.79,
"learning_rate": 0.00014231483211816264,
"loss": 0.8782,
"step": 78750
},
{
"epoch": 6.82,
"learning_rate": 0.00014057172913987414,
"loss": 0.879,
"step": 79000
},
{
"epoch": 6.84,
"learning_rate": 0.00013883609474119667,
"loss": 0.878,
"step": 79250
},
{
"epoch": 6.86,
"learning_rate": 0.00013710801022969032,
"loss": 0.8784,
"step": 79500
},
{
"epoch": 6.88,
"learning_rate": 0.00013538755655923278,
"loss": 0.8762,
"step": 79750
},
{
"epoch": 6.9,
"learning_rate": 0.00013367481432622743,
"loss": 0.8747,
"step": 80000
},
{
"epoch": 6.92,
"learning_rate": 0.00013196986376582716,
"loss": 0.8744,
"step": 80250
},
{
"epoch": 6.95,
"learning_rate": 0.0001302727847481763,
"loss": 0.875,
"step": 80500
},
{
"epoch": 6.97,
"learning_rate": 0.00012858365677466868,
"loss": 0.8746,
"step": 80750
},
{
"epoch": 6.99,
"learning_rate": 0.00012690255897422343,
"loss": 0.8735,
"step": 81000
},
{
"epoch": 7.0,
"eval_alliteration_score": 0.4531559581092556,
"eval_harmonic_meter_score": 0.2045466700855701,
"eval_harmonic_rhyme_score": 0.5273458131288548,
"eval_meter_score": 0.46399703611329074,
"eval_rhyme_score": 0.8458613205511866,
"eval_runtime": 1177.9262,
"eval_samples_per_second": 2.292,
"eval_steps_per_second": 0.287,
"step": 81137
},
{
"epoch": 7.01,
"learning_rate": 0.00012522957009957795,
"loss": 0.8649,
"step": 81250
},
{
"epoch": 7.03,
"learning_rate": 0.00012356476852359884,
"loss": 0.8542,
"step": 81500
},
{
"epoch": 7.05,
"learning_rate": 0.00012190823223561035,
"loss": 0.8522,
"step": 81750
},
{
"epoch": 7.07,
"learning_rate": 0.00012026003883774089,
"loss": 0.8536,
"step": 82000
},
{
"epoch": 7.1,
"learning_rate": 0.00011862026554128754,
"loss": 0.8549,
"step": 82250
},
{
"epoch": 7.12,
"learning_rate": 0.00011698898916309941,
"loss": 0.855,
"step": 82500
},
{
"epoch": 7.14,
"learning_rate": 0.00011536628612197879,
"loss": 0.8553,
"step": 82750
},
{
"epoch": 7.16,
"learning_rate": 0.00011375223243510097,
"loss": 0.8548,
"step": 83000
},
{
"epoch": 7.18,
"learning_rate": 0.00011214690371445381,
"loss": 0.8542,
"step": 83250
},
{
"epoch": 7.2,
"learning_rate": 0.00011055037516329507,
"loss": 0.8551,
"step": 83500
},
{
"epoch": 7.23,
"learning_rate": 0.00010896272157262967,
"loss": 0.8546,
"step": 83750
},
{
"epoch": 7.25,
"learning_rate": 0.00010738401731770592,
"loss": 0.853,
"step": 84000
},
{
"epoch": 7.27,
"learning_rate": 0.00010581433635453141,
"loss": 0.8537,
"step": 84250
},
{
"epoch": 7.29,
"learning_rate": 0.00010425375221640848,
"loss": 0.8523,
"step": 84500
},
{
"epoch": 7.31,
"learning_rate": 0.00010270233801048948,
"loss": 0.8521,
"step": 84750
},
{
"epoch": 7.33,
"learning_rate": 0.0001011601664143517,
"loss": 0.8522,
"step": 85000
},
{
"epoch": 7.35,
"learning_rate": 9.962730967259322e-05,
"loss": 0.8531,
"step": 85250
},
{
"epoch": 7.38,
"learning_rate": 9.810383959344827e-05,
"loss": 0.8509,
"step": 85500
},
{
"epoch": 7.4,
"learning_rate": 9.65898275454233e-05,
"loss": 0.8509,
"step": 85750
},
{
"epoch": 7.42,
"learning_rate": 9.508534445395351e-05,
"loss": 0.8492,
"step": 86000
},
{
"epoch": 7.44,
"learning_rate": 9.359046079808056e-05,
"loss": 0.8488,
"step": 86250
},
{
"epoch": 7.46,
"learning_rate": 9.210524660715084e-05,
"loss": 0.8498,
"step": 86500
},
{
"epoch": 7.48,
"learning_rate": 9.062977145753468e-05,
"loss": 0.8501,
"step": 86750
},
{
"epoch": 7.51,
"learning_rate": 8.916410446936725e-05,
"loss": 0.8499,
"step": 87000
},
{
"epoch": 7.53,
"learning_rate": 8.770831430331029e-05,
"loss": 0.8486,
"step": 87250
},
{
"epoch": 7.55,
"learning_rate": 8.626246915733591e-05,
"loss": 0.8475,
"step": 87500
},
{
"epoch": 7.57,
"learning_rate": 8.482663676353128e-05,
"loss": 0.8469,
"step": 87750
},
{
"epoch": 7.59,
"learning_rate": 8.340088438492646e-05,
"loss": 0.8465,
"step": 88000
},
{
"epoch": 7.61,
"learning_rate": 8.198527881234261e-05,
"loss": 0.8454,
"step": 88250
},
{
"epoch": 7.64,
"learning_rate": 8.057988636126368e-05,
"loss": 0.8455,
"step": 88500
},
{
"epoch": 7.66,
"learning_rate": 7.918477286872927e-05,
"loss": 0.8463,
"step": 88750
},
{
"epoch": 7.68,
"learning_rate": 7.780000369025093e-05,
"loss": 0.8439,
"step": 89000
},
{
"epoch": 7.7,
"learning_rate": 7.642564369675017e-05,
"loss": 0.8437,
"step": 89250
},
{
"epoch": 7.72,
"learning_rate": 7.506175727151986e-05,
"loss": 0.8444,
"step": 89500
},
{
"epoch": 7.74,
"learning_rate": 7.370840830720743e-05,
"loss": 0.8433,
"step": 89750
},
{
"epoch": 7.76,
"learning_rate": 7.236566020282263e-05,
"loss": 0.8423,
"step": 90000
},
{
"epoch": 7.79,
"learning_rate": 7.103357586076716e-05,
"loss": 0.8417,
"step": 90250
},
{
"epoch": 7.81,
"learning_rate": 6.971221768388777e-05,
"loss": 0.8428,
"step": 90500
},
{
"epoch": 7.83,
"learning_rate": 6.840164757255298e-05,
"loss": 0.8406,
"step": 90750
},
{
"epoch": 7.85,
"learning_rate": 6.710192692175355e-05,
"loss": 0.84,
"step": 91000
},
{
"epoch": 7.87,
"learning_rate": 6.581311661822627e-05,
"loss": 0.8402,
"step": 91250
},
{
"epoch": 7.89,
"learning_rate": 6.453527703760158e-05,
"loss": 0.838,
"step": 91500
},
{
"epoch": 7.92,
"learning_rate": 6.326846804157504e-05,
"loss": 0.838,
"step": 91750
},
{
"epoch": 7.94,
"learning_rate": 6.201274897510344e-05,
"loss": 0.8381,
"step": 92000
},
{
"epoch": 7.96,
"learning_rate": 6.076817866362461e-05,
"loss": 0.837,
"step": 92250
},
{
"epoch": 7.98,
"learning_rate": 5.953481541030134e-05,
"loss": 0.8385,
"step": 92500
},
{
"epoch": 8.0,
"eval_alliteration_score": 0.4492298916143754,
"eval_harmonic_meter_score": 0.22527704736583123,
"eval_harmonic_rhyme_score": 0.55956489384148,
"eval_meter_score": 0.4760072853422334,
"eval_rhyme_score": 0.8549378831695242,
"eval_runtime": 1170.1941,
"eval_samples_per_second": 2.307,
"eval_steps_per_second": 0.289,
"step": 92728
},
{
"epoch": 8.0,
"learning_rate": 5.831271699329055e-05,
"loss": 0.8367,
"step": 92750
},
{
"epoch": 8.02,
"learning_rate": 5.7101940663036404e-05,
"loss": 0.8191,
"step": 93000
},
{
"epoch": 8.05,
"learning_rate": 5.590254313958858e-05,
"loss": 0.8209,
"step": 93250
},
{
"epoch": 8.07,
"learning_rate": 5.471458060994458e-05,
"loss": 0.819,
"step": 93500
},
{
"epoch": 8.09,
"learning_rate": 5.353810872541822e-05,
"loss": 0.8203,
"step": 93750
},
{
"epoch": 8.11,
"learning_rate": 5.237318259903237e-05,
"loss": 0.8185,
"step": 94000
},
{
"epoch": 8.13,
"learning_rate": 5.121985680293711e-05,
"loss": 0.8197,
"step": 94250
},
{
"epoch": 8.15,
"learning_rate": 5.007818536585306e-05,
"loss": 0.8193,
"step": 94500
},
{
"epoch": 8.17,
"learning_rate": 4.8948221770540686e-05,
"loss": 0.8189,
"step": 94750
},
{
"epoch": 8.2,
"learning_rate": 4.7830018951294724e-05,
"loss": 0.8181,
"step": 95000
},
{
"epoch": 8.22,
"learning_rate": 4.6723629291464336e-05,
"loss": 0.8196,
"step": 95250
},
{
"epoch": 8.24,
"learning_rate": 4.5629104620999126e-05,
"loss": 0.8184,
"step": 95500
},
{
"epoch": 8.26,
"learning_rate": 4.4546496214021265e-05,
"loss": 0.8189,
"step": 95750
},
{
"epoch": 8.28,
"learning_rate": 4.347585478642348e-05,
"loss": 0.817,
"step": 96000
},
{
"epoch": 8.3,
"learning_rate": 4.241723049349309e-05,
"loss": 0.8205,
"step": 96250
},
{
"epoch": 8.33,
"learning_rate": 4.1370672927562575e-05,
"loss": 0.8183,
"step": 96500
},
{
"epoch": 8.35,
"learning_rate": 4.033623111568628e-05,
"loss": 0.8176,
"step": 96750
},
{
"epoch": 8.37,
"learning_rate": 3.931395351734373e-05,
"loss": 0.816,
"step": 97000
},
{
"epoch": 8.39,
"learning_rate": 3.830388802216939e-05,
"loss": 0.8177,
"step": 97250
},
{
"epoch": 8.41,
"learning_rate": 3.730608194770944e-05,
"loss": 0.8171,
"step": 97500
},
{
"epoch": 8.43,
"learning_rate": 3.6320582037205024e-05,
"loss": 0.8153,
"step": 97750
},
{
"epoch": 8.45,
"learning_rate": 3.53474344574025e-05,
"loss": 0.8172,
"step": 98000
},
{
"epoch": 8.48,
"learning_rate": 3.438668479639057e-05,
"loss": 0.8164,
"step": 98250
},
{
"epoch": 8.5,
"learning_rate": 3.3438378061465e-05,
"loss": 0.8147,
"step": 98500
},
{
"epoch": 8.52,
"learning_rate": 3.250255867701994e-05,
"loss": 0.8145,
"step": 98750
},
{
"epoch": 8.54,
"learning_rate": 3.157927048246697e-05,
"loss": 0.8158,
"step": 99000
},
{
"epoch": 8.56,
"learning_rate": 3.0668556730181194e-05,
"loss": 0.8133,
"step": 99250
},
{
"epoch": 8.58,
"learning_rate": 2.97704600834753e-05,
"loss": 0.814,
"step": 99500
},
{
"epoch": 8.61,
"learning_rate": 2.8885022614600705e-05,
"loss": 0.8133,
"step": 99750
},
{
"epoch": 8.63,
"learning_rate": 2.8012285802776934e-05,
"loss": 0.8146,
"step": 100000
},
{
"epoch": 8.65,
"learning_rate": 2.7152290532248077e-05,
"loss": 0.8121,
"step": 100250
},
{
"epoch": 8.67,
"learning_rate": 2.6305077090367855e-05,
"loss": 0.8137,
"step": 100500
},
{
"epoch": 8.69,
"learning_rate": 2.5470685165712322e-05,
"loss": 0.8126,
"step": 100750
},
{
"epoch": 8.71,
"learning_rate": 2.4649153846220405e-05,
"loss": 0.8137,
"step": 101000
},
{
"epoch": 8.74,
"learning_rate": 2.3840521617362807e-05,
"loss": 0.8121,
"step": 101250
},
{
"epoch": 8.76,
"learning_rate": 2.304482636033934e-05,
"loss": 0.8136,
"step": 101500
},
{
"epoch": 8.78,
"learning_rate": 2.2262105350304206e-05,
"loss": 0.8119,
"step": 101750
},
{
"epoch": 8.8,
"learning_rate": 2.149239525461971e-05,
"loss": 0.8121,
"step": 102000
},
{
"epoch": 8.82,
"learning_rate": 2.0735732131138694e-05,
"loss": 0.8122,
"step": 102250
},
{
"epoch": 8.84,
"learning_rate": 1.9992151426515303e-05,
"loss": 0.81,
"step": 102500
},
{
"epoch": 8.86,
"learning_rate": 1.9261687974544515e-05,
"loss": 0.8127,
"step": 102750
},
{
"epoch": 8.89,
"learning_rate": 1.8544375994530026e-05,
"loss": 0.8113,
"step": 103000
},
{
"epoch": 8.91,
"learning_rate": 1.7840249089681714e-05,
"loss": 0.8091,
"step": 103250
},
{
"epoch": 8.93,
"learning_rate": 1.7149340245541043e-05,
"loss": 0.8137,
"step": 103500
},
{
"epoch": 8.95,
"learning_rate": 1.64716818284361e-05,
"loss": 0.8096,
"step": 103750
},
{
"epoch": 8.97,
"learning_rate": 1.5807305583965002e-05,
"loss": 0.8089,
"step": 104000
},
{
"epoch": 8.99,
"learning_rate": 1.5156242635509187e-05,
"loss": 0.8101,
"step": 104250
},
{
"epoch": 9.0,
"eval_alliteration_score": 0.45457142857142857,
"eval_harmonic_meter_score": 0.22300740426243393,
"eval_harmonic_rhyme_score": 0.5252016141180921,
"eval_meter_score": 0.47249164549890543,
"eval_rhyme_score": 0.8439913924542158,
"eval_runtime": 1174.1795,
"eval_samples_per_second": 2.299,
"eval_steps_per_second": 0.288,
"step": 104319
},
{
"epoch": 9.02,
"learning_rate": 1.451852348277508e-05,
"loss": 0.8024,
"step": 104500
},
{
"epoch": 9.04,
"learning_rate": 1.3894178000365508e-05,
"loss": 0.7977,
"step": 104750
},
{
"epoch": 9.06,
"learning_rate": 1.3283235436379947e-05,
"loss": 0.7995,
"step": 105000
},
{
"epoch": 9.08,
"learning_rate": 1.2685724411044718e-05,
"loss": 0.7984,
"step": 105250
},
{
"epoch": 9.1,
"learning_rate": 1.2101672915371896e-05,
"loss": 0.7992,
"step": 105500
},
{
"epoch": 9.12,
"learning_rate": 1.1531108309848192e-05,
"loss": 0.799,
"step": 105750
},
{
"epoch": 9.15,
"learning_rate": 1.0974057323153274e-05,
"loss": 0.7974,
"step": 106000
},
{
"epoch": 9.17,
"learning_rate": 1.0430546050907607e-05,
"loss": 0.7993,
"step": 106250
},
{
"epoch": 9.19,
"learning_rate": 9.900599954449895e-06,
"loss": 0.7971,
"step": 106500
},
{
"epoch": 9.21,
"learning_rate": 9.38424385964437e-06,
"loss": 0.7985,
"step": 106750
},
{
"epoch": 9.23,
"learning_rate": 8.881501955717851e-06,
"loss": 0.7973,
"step": 107000
},
{
"epoch": 9.25,
"learning_rate": 8.392397794126493e-06,
"loss": 0.797,
"step": 107250
},
{
"epoch": 9.27,
"learning_rate": 7.91695428745256e-06,
"loss": 0.7977,
"step": 107500
},
{
"epoch": 9.3,
"learning_rate": 7.4551937083310195e-06,
"loss": 0.7977,
"step": 107750
},
{
"epoch": 9.32,
"learning_rate": 7.007137688406183e-06,
"loss": 0.7967,
"step": 108000
},
{
"epoch": 9.34,
"learning_rate": 6.57280721731831e-06,
"loss": 0.7969,
"step": 108250
},
{
"epoch": 9.36,
"learning_rate": 6.152222641720361e-06,
"loss": 0.7979,
"step": 108500
},
{
"epoch": 9.38,
"learning_rate": 5.745403664324866e-06,
"loss": 0.7972,
"step": 108750
},
{
"epoch": 9.4,
"learning_rate": 5.352369342980855e-06,
"loss": 0.7982,
"step": 109000
},
{
"epoch": 9.43,
"learning_rate": 4.973138089781115e-06,
"loss": 0.797,
"step": 109250
},
{
"epoch": 9.45,
"learning_rate": 4.607727670199734e-06,
"loss": 0.7971,
"step": 109500
},
{
"epoch": 9.47,
"learning_rate": 4.256155202259682e-06,
"loss": 0.7982,
"step": 109750
},
{
"epoch": 9.49,
"learning_rate": 3.918437155731036e-06,
"loss": 0.7968,
"step": 110000
},
{
"epoch": 9.51,
"learning_rate": 3.5945893513594116e-06,
"loss": 0.7971,
"step": 110250
},
{
"epoch": 9.53,
"learning_rate": 3.2846269601248474e-06,
"loss": 0.7967,
"step": 110500
},
{
"epoch": 9.55,
"learning_rate": 2.988564502530977e-06,
"loss": 0.7973,
"step": 110750
},
{
"epoch": 9.58,
"learning_rate": 2.706415847925003e-06,
"loss": 0.796,
"step": 111000
},
{
"epoch": 9.6,
"learning_rate": 2.4381942138477884e-06,
"loss": 0.7959,
"step": 111250
},
{
"epoch": 9.62,
"learning_rate": 2.1839121654147805e-06,
"loss": 0.7966,
"step": 111500
},
{
"epoch": 9.64,
"learning_rate": 1.9435816147273187e-06,
"loss": 0.7982,
"step": 111750
},
{
"epoch": 9.66,
"learning_rate": 1.7172138203147113e-06,
"loss": 0.7974,
"step": 112000
},
{
"epoch": 9.68,
"learning_rate": 1.5048193866066261e-06,
"loss": 0.7962,
"step": 112250
},
{
"epoch": 9.71,
"learning_rate": 1.306408263436487e-06,
"loss": 0.796,
"step": 112500
},
{
"epoch": 9.73,
"learning_rate": 1.1219897455753134e-06,
"loss": 0.7969,
"step": 112750
},
{
"epoch": 9.75,
"learning_rate": 9.515724722962692e-07,
"loss": 0.7966,
"step": 113000
},
{
"epoch": 9.77,
"learning_rate": 7.95164426969952e-07,
"loss": 0.7971,
"step": 113250
},
{
"epoch": 9.79,
"learning_rate": 6.5277293669046e-07,
"loss": 0.7964,
"step": 113500
},
{
"epoch": 9.81,
"learning_rate": 5.244046719320661e-07,
"loss": 0.7987,
"step": 113750
},
{
"epoch": 9.84,
"learning_rate": 4.1006564623676794e-07,
"loss": 0.7974,
"step": 114000
},
{
"epoch": 9.86,
"learning_rate": 3.097612159326135e-07,
"loss": 0.7968,
"step": 114250
},
{
"epoch": 9.88,
"learning_rate": 2.2349607988273454e-07,
"loss": 0.7967,
"step": 114500
},
{
"epoch": 9.9,
"learning_rate": 1.512742792652233e-07,
"loss": 0.7963,
"step": 114750
},
{
"epoch": 9.92,
"learning_rate": 9.309919738381688e-08,
"loss": 0.7958,
"step": 115000
},
{
"epoch": 9.94,
"learning_rate": 4.897355950945758e-08,
"loss": 0.7967,
"step": 115250
},
{
"epoch": 9.96,
"learning_rate": 1.8899432752528255e-08,
"loss": 0.7965,
"step": 115500
},
{
"epoch": 9.99,
"learning_rate": 2.8782259661630503e-09,
"loss": 0.7964,
"step": 115750
},
{
"epoch": 10.0,
"eval_alliteration_score": 0.44889892715979673,
"eval_harmonic_meter_score": 0.22169202291201373,
"eval_harmonic_rhyme_score": 0.5585708483881331,
"eval_meter_score": 0.46651850277597545,
"eval_rhyme_score": 0.8524697393236106,
"eval_runtime": 1181.0934,
"eval_samples_per_second": 2.286,
"eval_steps_per_second": 0.286,
"step": 115910
},
{
"epoch": 10.0,
"step": 115910,
"total_flos": 1.446063607638917e+18,
"train_loss": 0.9344727087942378,
"train_runtime": 52307.177,
"train_samples_per_second": 283.652,
"train_steps_per_second": 2.216
}
],
"max_steps": 115910,
"num_train_epochs": 10,
"total_flos": 1.446063607638917e+18,
"trial_name": null,
"trial_params": null
}