bart-base-coqa / trainer_state.json
rooftopcoder's picture
End of training
04bee0f
raw
history blame
9.36 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 40743,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 4.754122497055359e-05,
"loss": 1.7036,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 4.50873576756969e-05,
"loss": 1.41,
"step": 1000
},
{
"epoch": 0.11,
"learning_rate": 4.2633490380840206e-05,
"loss": 1.3415,
"step": 1500
},
{
"epoch": 0.15,
"learning_rate": 4.0179623085983514e-05,
"loss": 1.3378,
"step": 2000
},
{
"epoch": 0.18,
"learning_rate": 3.7725755791126815e-05,
"loss": 1.2834,
"step": 2500
},
{
"epoch": 0.22,
"learning_rate": 3.527188849627012e-05,
"loss": 1.2521,
"step": 3000
},
{
"epoch": 0.26,
"learning_rate": 3.281802120141343e-05,
"loss": 1.2503,
"step": 3500
},
{
"epoch": 0.29,
"learning_rate": 3.0364153906556735e-05,
"loss": 1.2091,
"step": 4000
},
{
"epoch": 0.33,
"learning_rate": 2.7910286611700043e-05,
"loss": 1.1859,
"step": 4500
},
{
"epoch": 0.37,
"learning_rate": 2.5456419316843344e-05,
"loss": 1.1315,
"step": 5000
},
{
"epoch": 0.4,
"learning_rate": 2.3002552021986652e-05,
"loss": 1.1428,
"step": 5500
},
{
"epoch": 0.44,
"learning_rate": 2.0548684727129957e-05,
"loss": 1.131,
"step": 6000
},
{
"epoch": 0.48,
"learning_rate": 1.8094817432273264e-05,
"loss": 1.1061,
"step": 6500
},
{
"epoch": 0.52,
"learning_rate": 1.564095013741657e-05,
"loss": 1.0928,
"step": 7000
},
{
"epoch": 0.55,
"learning_rate": 1.3187082842559875e-05,
"loss": 1.0874,
"step": 7500
},
{
"epoch": 0.59,
"learning_rate": 1.0733215547703181e-05,
"loss": 1.0383,
"step": 8000
},
{
"epoch": 0.63,
"learning_rate": 8.279348252846486e-06,
"loss": 1.053,
"step": 8500
},
{
"epoch": 0.66,
"learning_rate": 5.8254809579897925e-06,
"loss": 1.0296,
"step": 9000
},
{
"epoch": 0.7,
"learning_rate": 3.3716136631330974e-06,
"loss": 1.0306,
"step": 9500
},
{
"epoch": 0.74,
"learning_rate": 9.177463682764037e-07,
"loss": 1.0046,
"step": 10000
},
{
"epoch": 0.77,
"learning_rate": 0.0,
"loss": 1.0091,
"step": 10500
},
{
"epoch": 0.81,
"learning_rate": 0.0,
"loss": 1.0358,
"step": 11000
},
{
"epoch": 0.85,
"learning_rate": 0.0,
"loss": 1.0118,
"step": 11500
},
{
"epoch": 0.88,
"learning_rate": 0.0,
"loss": 0.9876,
"step": 12000
},
{
"epoch": 0.92,
"learning_rate": 0.0,
"loss": 1.0315,
"step": 12500
},
{
"epoch": 0.96,
"learning_rate": 0.0,
"loss": 1.0118,
"step": 13000
},
{
"epoch": 0.99,
"learning_rate": 0.0,
"loss": 1.0024,
"step": 13500
},
{
"epoch": 1.03,
"learning_rate": 0.0,
"loss": 0.8988,
"step": 14000
},
{
"epoch": 1.07,
"learning_rate": 0.0,
"loss": 0.8442,
"step": 14500
},
{
"epoch": 1.1,
"learning_rate": 0.0,
"loss": 0.8775,
"step": 15000
},
{
"epoch": 1.14,
"learning_rate": 0.0,
"loss": 0.8822,
"step": 15500
},
{
"epoch": 1.18,
"learning_rate": 0.0,
"loss": 0.8732,
"step": 16000
},
{
"epoch": 1.21,
"learning_rate": 0.0,
"loss": 0.8775,
"step": 16500
},
{
"epoch": 1.25,
"learning_rate": 0.0,
"loss": 0.8662,
"step": 17000
},
{
"epoch": 1.29,
"learning_rate": 0.0,
"loss": 0.8707,
"step": 17500
},
{
"epoch": 1.33,
"learning_rate": 0.0,
"loss": 0.8568,
"step": 18000
},
{
"epoch": 1.36,
"learning_rate": 0.0,
"loss": 0.8737,
"step": 18500
},
{
"epoch": 1.4,
"learning_rate": 0.0,
"loss": 0.8579,
"step": 19000
},
{
"epoch": 1.44,
"learning_rate": 0.0,
"loss": 0.883,
"step": 19500
},
{
"epoch": 1.47,
"learning_rate": 0.0,
"loss": 0.8582,
"step": 20000
},
{
"epoch": 1.51,
"learning_rate": 0.0,
"loss": 0.8591,
"step": 20500
},
{
"epoch": 1.55,
"learning_rate": 0.0,
"loss": 0.8582,
"step": 21000
},
{
"epoch": 1.58,
"learning_rate": 0.0,
"loss": 0.8864,
"step": 21500
},
{
"epoch": 1.62,
"learning_rate": 0.0,
"loss": 0.8819,
"step": 22000
},
{
"epoch": 1.66,
"learning_rate": 0.0,
"loss": 0.8462,
"step": 22500
},
{
"epoch": 1.69,
"learning_rate": 0.0,
"loss": 0.8641,
"step": 23000
},
{
"epoch": 1.73,
"learning_rate": 0.0,
"loss": 0.8762,
"step": 23500
},
{
"epoch": 1.77,
"learning_rate": 0.0,
"loss": 0.8787,
"step": 24000
},
{
"epoch": 1.8,
"learning_rate": 0.0,
"loss": 0.8632,
"step": 24500
},
{
"epoch": 1.84,
"learning_rate": 0.0,
"loss": 0.8648,
"step": 25000
},
{
"epoch": 1.88,
"learning_rate": 0.0,
"loss": 0.8755,
"step": 25500
},
{
"epoch": 1.91,
"learning_rate": 0.0,
"loss": 0.8521,
"step": 26000
},
{
"epoch": 1.95,
"learning_rate": 0.0,
"loss": 0.8941,
"step": 26500
},
{
"epoch": 1.99,
"learning_rate": 0.0,
"loss": 0.8518,
"step": 27000
},
{
"epoch": 2.02,
"learning_rate": 0.0,
"loss": 0.8689,
"step": 27500
},
{
"epoch": 2.06,
"learning_rate": 0.0,
"loss": 0.8546,
"step": 28000
},
{
"epoch": 2.1,
"learning_rate": 0.0,
"loss": 0.8404,
"step": 28500
},
{
"epoch": 2.14,
"learning_rate": 0.0,
"loss": 0.8472,
"step": 29000
},
{
"epoch": 2.17,
"learning_rate": 0.0,
"loss": 0.8793,
"step": 29500
},
{
"epoch": 2.21,
"learning_rate": 0.0,
"loss": 0.8845,
"step": 30000
},
{
"epoch": 2.25,
"learning_rate": 0.0,
"loss": 0.8487,
"step": 30500
},
{
"epoch": 2.28,
"learning_rate": 0.0,
"loss": 0.8641,
"step": 31000
},
{
"epoch": 2.32,
"learning_rate": 0.0,
"loss": 0.886,
"step": 31500
},
{
"epoch": 2.36,
"learning_rate": 0.0,
"loss": 0.8828,
"step": 32000
},
{
"epoch": 2.39,
"learning_rate": 0.0,
"loss": 0.8637,
"step": 32500
},
{
"epoch": 2.43,
"learning_rate": 0.0,
"loss": 0.8748,
"step": 33000
},
{
"epoch": 2.47,
"learning_rate": 0.0,
"loss": 0.879,
"step": 33500
},
{
"epoch": 2.5,
"learning_rate": 0.0,
"loss": 0.9094,
"step": 34000
},
{
"epoch": 2.54,
"learning_rate": 0.0,
"loss": 0.8687,
"step": 34500
},
{
"epoch": 2.58,
"learning_rate": 0.0,
"loss": 0.8475,
"step": 35000
},
{
"epoch": 2.61,
"learning_rate": 0.0,
"loss": 0.8596,
"step": 35500
},
{
"epoch": 2.65,
"learning_rate": 0.0,
"loss": 0.8806,
"step": 36000
},
{
"epoch": 2.69,
"learning_rate": 0.0,
"loss": 0.8705,
"step": 36500
},
{
"epoch": 2.72,
"learning_rate": 0.0,
"loss": 0.865,
"step": 37000
},
{
"epoch": 2.76,
"learning_rate": 0.0,
"loss": 0.8704,
"step": 37500
},
{
"epoch": 2.8,
"learning_rate": 0.0,
"loss": 0.8679,
"step": 38000
},
{
"epoch": 2.83,
"learning_rate": 0.0,
"loss": 0.8774,
"step": 38500
},
{
"epoch": 2.87,
"learning_rate": 0.0,
"loss": 0.8955,
"step": 39000
},
{
"epoch": 2.91,
"learning_rate": 0.0,
"loss": 0.8778,
"step": 39500
},
{
"epoch": 2.95,
"learning_rate": 0.0,
"loss": 0.8495,
"step": 40000
},
{
"epoch": 2.98,
"learning_rate": 0.0,
"loss": 0.8836,
"step": 40500
},
{
"epoch": 3.0,
"step": 40743,
"total_flos": 9.6652398495744e+16,
"train_loss": 0.960862031856172,
"train_runtime": 14382.9455,
"train_samples_per_second": 22.662,
"train_steps_per_second": 2.833
}
],
"max_steps": 40743,
"num_train_epochs": 3,
"total_flos": 9.6652398495744e+16,
"trial_name": null,
"trial_params": null
}