Text2Text Generation
English
pseudonymization-seq2seq / flair /trainer_state.json
olexandryermilov's picture
Initiaal commit
89956b5
raw
history blame
10.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 40206,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 4.93782022583694e-05,
"loss": 1.3505,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 4.8756404516738796e-05,
"loss": 1.2157,
"step": 1000
},
{
"epoch": 0.11,
"learning_rate": 4.8134606775108196e-05,
"loss": 1.1643,
"step": 1500
},
{
"epoch": 0.15,
"learning_rate": 4.751280903347759e-05,
"loss": 1.1273,
"step": 2000
},
{
"epoch": 0.19,
"learning_rate": 4.689101129184699e-05,
"loss": 1.1179,
"step": 2500
},
{
"epoch": 0.22,
"learning_rate": 4.626921355021639e-05,
"loss": 1.0905,
"step": 3000
},
{
"epoch": 0.26,
"learning_rate": 4.5647415808585783e-05,
"loss": 1.0808,
"step": 3500
},
{
"epoch": 0.3,
"learning_rate": 4.5025618066955184e-05,
"loss": 1.0725,
"step": 4000
},
{
"epoch": 0.34,
"learning_rate": 4.440382032532458e-05,
"loss": 1.0667,
"step": 4500
},
{
"epoch": 0.37,
"learning_rate": 4.378202258369398e-05,
"loss": 1.0585,
"step": 5000
},
{
"epoch": 0.41,
"learning_rate": 4.316022484206337e-05,
"loss": 1.04,
"step": 5500
},
{
"epoch": 0.45,
"learning_rate": 4.253842710043277e-05,
"loss": 1.0272,
"step": 6000
},
{
"epoch": 0.49,
"learning_rate": 4.191662935880217e-05,
"loss": 1.0278,
"step": 6500
},
{
"epoch": 0.52,
"learning_rate": 4.129483161717157e-05,
"loss": 1.0245,
"step": 7000
},
{
"epoch": 0.56,
"learning_rate": 4.0673033875540965e-05,
"loss": 1.0218,
"step": 7500
},
{
"epoch": 0.6,
"learning_rate": 4.0051236133910365e-05,
"loss": 1.0065,
"step": 8000
},
{
"epoch": 0.63,
"learning_rate": 3.942943839227976e-05,
"loss": 1.0135,
"step": 8500
},
{
"epoch": 0.67,
"learning_rate": 3.880764065064916e-05,
"loss": 1.0041,
"step": 9000
},
{
"epoch": 0.71,
"learning_rate": 3.818584290901855e-05,
"loss": 0.9972,
"step": 9500
},
{
"epoch": 0.75,
"learning_rate": 3.756404516738795e-05,
"loss": 0.9961,
"step": 10000
},
{
"epoch": 0.78,
"learning_rate": 3.694224742575735e-05,
"loss": 0.9878,
"step": 10500
},
{
"epoch": 0.82,
"learning_rate": 3.632044968412675e-05,
"loss": 0.979,
"step": 11000
},
{
"epoch": 0.86,
"learning_rate": 3.5698651942496146e-05,
"loss": 0.9852,
"step": 11500
},
{
"epoch": 0.9,
"learning_rate": 3.5076854200865546e-05,
"loss": 0.9816,
"step": 12000
},
{
"epoch": 0.93,
"learning_rate": 3.445505645923494e-05,
"loss": 0.9706,
"step": 12500
},
{
"epoch": 0.97,
"learning_rate": 3.383325871760434e-05,
"loss": 0.9656,
"step": 13000
},
{
"epoch": 1.01,
"learning_rate": 3.321146097597373e-05,
"loss": 0.9611,
"step": 13500
},
{
"epoch": 1.04,
"learning_rate": 3.258966323434313e-05,
"loss": 0.9435,
"step": 14000
},
{
"epoch": 1.08,
"learning_rate": 3.196786549271253e-05,
"loss": 0.9378,
"step": 14500
},
{
"epoch": 1.12,
"learning_rate": 3.1346067751081934e-05,
"loss": 0.9384,
"step": 15000
},
{
"epoch": 1.16,
"learning_rate": 3.072427000945133e-05,
"loss": 0.9291,
"step": 15500
},
{
"epoch": 1.19,
"learning_rate": 3.0102472267820724e-05,
"loss": 0.9446,
"step": 16000
},
{
"epoch": 1.23,
"learning_rate": 2.948067452619012e-05,
"loss": 0.9411,
"step": 16500
},
{
"epoch": 1.27,
"learning_rate": 2.8858876784559517e-05,
"loss": 0.9342,
"step": 17000
},
{
"epoch": 1.31,
"learning_rate": 2.8237079042928914e-05,
"loss": 0.9371,
"step": 17500
},
{
"epoch": 1.34,
"learning_rate": 2.761528130129831e-05,
"loss": 0.9319,
"step": 18000
},
{
"epoch": 1.38,
"learning_rate": 2.6993483559667715e-05,
"loss": 0.9159,
"step": 18500
},
{
"epoch": 1.42,
"learning_rate": 2.637168581803711e-05,
"loss": 0.9133,
"step": 19000
},
{
"epoch": 1.46,
"learning_rate": 2.5749888076406508e-05,
"loss": 0.923,
"step": 19500
},
{
"epoch": 1.49,
"learning_rate": 2.5128090334775905e-05,
"loss": 0.9213,
"step": 20000
},
{
"epoch": 1.53,
"learning_rate": 2.4506292593145302e-05,
"loss": 0.9275,
"step": 20500
},
{
"epoch": 1.57,
"learning_rate": 2.38844948515147e-05,
"loss": 0.9254,
"step": 21000
},
{
"epoch": 1.6,
"learning_rate": 2.32626971098841e-05,
"loss": 0.9222,
"step": 21500
},
{
"epoch": 1.64,
"learning_rate": 2.2640899368253496e-05,
"loss": 0.9163,
"step": 22000
},
{
"epoch": 1.68,
"learning_rate": 2.2019101626622892e-05,
"loss": 0.9141,
"step": 22500
},
{
"epoch": 1.72,
"learning_rate": 2.139730388499229e-05,
"loss": 0.9194,
"step": 23000
},
{
"epoch": 1.75,
"learning_rate": 2.077550614336169e-05,
"loss": 0.9217,
"step": 23500
},
{
"epoch": 1.79,
"learning_rate": 2.0153708401731086e-05,
"loss": 0.9077,
"step": 24000
},
{
"epoch": 1.83,
"learning_rate": 1.9531910660100483e-05,
"loss": 0.9141,
"step": 24500
},
{
"epoch": 1.87,
"learning_rate": 1.891011291846988e-05,
"loss": 0.9098,
"step": 25000
},
{
"epoch": 1.9,
"learning_rate": 1.828831517683928e-05,
"loss": 0.9056,
"step": 25500
},
{
"epoch": 1.94,
"learning_rate": 1.7666517435208677e-05,
"loss": 0.906,
"step": 26000
},
{
"epoch": 1.98,
"learning_rate": 1.7044719693578074e-05,
"loss": 0.9138,
"step": 26500
},
{
"epoch": 2.01,
"learning_rate": 1.642292195194747e-05,
"loss": 0.9104,
"step": 27000
},
{
"epoch": 2.05,
"learning_rate": 1.580112421031687e-05,
"loss": 0.8906,
"step": 27500
},
{
"epoch": 2.09,
"learning_rate": 1.5179326468686267e-05,
"loss": 0.8829,
"step": 28000
},
{
"epoch": 2.13,
"learning_rate": 1.4557528727055664e-05,
"loss": 0.8873,
"step": 28500
},
{
"epoch": 2.16,
"learning_rate": 1.3935730985425061e-05,
"loss": 0.8901,
"step": 29000
},
{
"epoch": 2.2,
"learning_rate": 1.3313933243794461e-05,
"loss": 0.8795,
"step": 29500
},
{
"epoch": 2.24,
"learning_rate": 1.2692135502163858e-05,
"loss": 0.8837,
"step": 30000
},
{
"epoch": 2.28,
"learning_rate": 1.2070337760533255e-05,
"loss": 0.8721,
"step": 30500
},
{
"epoch": 2.31,
"learning_rate": 1.1448540018902652e-05,
"loss": 0.8844,
"step": 31000
},
{
"epoch": 2.35,
"learning_rate": 1.082674227727205e-05,
"loss": 0.8878,
"step": 31500
},
{
"epoch": 2.39,
"learning_rate": 1.0204944535641447e-05,
"loss": 0.8782,
"step": 32000
},
{
"epoch": 2.43,
"learning_rate": 9.583146794010844e-06,
"loss": 0.8824,
"step": 32500
},
{
"epoch": 2.46,
"learning_rate": 8.961349052380242e-06,
"loss": 0.8963,
"step": 33000
},
{
"epoch": 2.5,
"learning_rate": 8.339551310749639e-06,
"loss": 0.8801,
"step": 33500
},
{
"epoch": 2.54,
"learning_rate": 7.717753569119037e-06,
"loss": 0.8738,
"step": 34000
},
{
"epoch": 2.57,
"learning_rate": 7.095955827488435e-06,
"loss": 0.8803,
"step": 34500
},
{
"epoch": 2.61,
"learning_rate": 6.474158085857833e-06,
"loss": 0.8751,
"step": 35000
},
{
"epoch": 2.65,
"learning_rate": 5.8523603442272295e-06,
"loss": 0.8778,
"step": 35500
},
{
"epoch": 2.69,
"learning_rate": 5.230562602596627e-06,
"loss": 0.8812,
"step": 36000
},
{
"epoch": 2.72,
"learning_rate": 4.608764860966025e-06,
"loss": 0.8725,
"step": 36500
},
{
"epoch": 2.76,
"learning_rate": 3.9869671193354225e-06,
"loss": 0.8728,
"step": 37000
},
{
"epoch": 2.8,
"learning_rate": 3.36516937770482e-06,
"loss": 0.8777,
"step": 37500
},
{
"epoch": 2.84,
"learning_rate": 2.7433716360742178e-06,
"loss": 0.8765,
"step": 38000
},
{
"epoch": 2.87,
"learning_rate": 2.1215738944436154e-06,
"loss": 0.8783,
"step": 38500
},
{
"epoch": 2.91,
"learning_rate": 1.499776152813013e-06,
"loss": 0.8701,
"step": 39000
},
{
"epoch": 2.95,
"learning_rate": 8.779784111824107e-07,
"loss": 0.8745,
"step": 39500
},
{
"epoch": 2.98,
"learning_rate": 2.561806695518082e-07,
"loss": 0.8763,
"step": 40000
},
{
"epoch": 3.0,
"step": 40206,
"total_flos": 5.667767240267059e+17,
"train_loss": 0.9513506801817564,
"train_runtime": 16971.3116,
"train_samples_per_second": 75.805,
"train_steps_per_second": 2.369
}
],
"max_steps": 40206,
"num_train_epochs": 3,
"total_flos": 5.667767240267059e+17,
"trial_name": null,
"trial_params": null
}