gpt2-1.5B-180M-USPTOAndPubMedAbs / trainer_state.json
Allison Casasola
files
76bc2d4
raw
history blame
5.85 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3235294117647059,
"eval_steps": 500,
"global_step": 22000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.9632352941176476e-05,
"loss": 6.7506,
"step": 500
},
{
"epoch": 0.01,
"learning_rate": 4.9264705882352944e-05,
"loss": 6.0294,
"step": 1000
},
{
"epoch": 0.02,
"learning_rate": 4.889705882352941e-05,
"loss": 5.6952,
"step": 1500
},
{
"epoch": 0.03,
"learning_rate": 4.8529411764705885e-05,
"loss": 5.3818,
"step": 2000
},
{
"epoch": 0.04,
"learning_rate": 4.816176470588236e-05,
"loss": 5.138,
"step": 2500
},
{
"epoch": 0.04,
"learning_rate": 4.7794117647058826e-05,
"loss": 4.946,
"step": 3000
},
{
"epoch": 0.05,
"learning_rate": 4.742647058823529e-05,
"loss": 4.7386,
"step": 3500
},
{
"epoch": 0.06,
"learning_rate": 4.705882352941177e-05,
"loss": 4.5023,
"step": 4000
},
{
"epoch": 0.07,
"learning_rate": 4.669117647058824e-05,
"loss": 4.2927,
"step": 4500
},
{
"epoch": 0.07,
"learning_rate": 4.632352941176471e-05,
"loss": 4.1612,
"step": 5000
},
{
"epoch": 0.08,
"learning_rate": 4.5955882352941176e-05,
"loss": 4.0391,
"step": 5500
},
{
"epoch": 0.09,
"learning_rate": 4.558823529411765e-05,
"loss": 4.0038,
"step": 6000
},
{
"epoch": 0.1,
"learning_rate": 4.522058823529412e-05,
"loss": 3.9057,
"step": 6500
},
{
"epoch": 0.1,
"learning_rate": 4.485294117647059e-05,
"loss": 3.8217,
"step": 7000
},
{
"epoch": 0.11,
"learning_rate": 4.448529411764706e-05,
"loss": 3.7518,
"step": 7500
},
{
"epoch": 0.12,
"learning_rate": 4.411764705882353e-05,
"loss": 3.6375,
"step": 8000
},
{
"epoch": 0.12,
"learning_rate": 4.375e-05,
"loss": 3.568,
"step": 8500
},
{
"epoch": 0.13,
"learning_rate": 4.3382352941176474e-05,
"loss": 3.4688,
"step": 9000
},
{
"epoch": 0.14,
"learning_rate": 4.301470588235295e-05,
"loss": 3.4439,
"step": 9500
},
{
"epoch": 0.15,
"learning_rate": 4.2647058823529415e-05,
"loss": 3.4011,
"step": 10000
},
{
"epoch": 0.15,
"learning_rate": 4.227941176470588e-05,
"loss": 3.366,
"step": 10500
},
{
"epoch": 0.16,
"learning_rate": 4.1911764705882356e-05,
"loss": 3.359,
"step": 11000
},
{
"epoch": 0.17,
"learning_rate": 4.154411764705883e-05,
"loss": 3.3448,
"step": 11500
},
{
"epoch": 0.18,
"learning_rate": 4.11764705882353e-05,
"loss": 3.3091,
"step": 12000
},
{
"epoch": 0.18,
"learning_rate": 4.0808823529411765e-05,
"loss": 3.2667,
"step": 12500
},
{
"epoch": 0.19,
"learning_rate": 4.044117647058824e-05,
"loss": 3.2444,
"step": 13000
},
{
"epoch": 0.2,
"learning_rate": 4.007352941176471e-05,
"loss": 3.2749,
"step": 13500
},
{
"epoch": 0.21,
"learning_rate": 3.970588235294117e-05,
"loss": 3.2861,
"step": 14000
},
{
"epoch": 0.21,
"learning_rate": 3.933823529411765e-05,
"loss": 3.3115,
"step": 14500
},
{
"epoch": 0.22,
"learning_rate": 3.897058823529412e-05,
"loss": 3.2376,
"step": 15000
},
{
"epoch": 0.23,
"learning_rate": 3.8602941176470595e-05,
"loss": 3.2652,
"step": 15500
},
{
"epoch": 0.24,
"learning_rate": 3.8235294117647055e-05,
"loss": 3.1289,
"step": 16000
},
{
"epoch": 0.24,
"learning_rate": 3.786764705882353e-05,
"loss": 3.1547,
"step": 16500
},
{
"epoch": 0.25,
"learning_rate": 3.7500000000000003e-05,
"loss": 3.1133,
"step": 17000
},
{
"epoch": 0.26,
"learning_rate": 3.713235294117647e-05,
"loss": 3.0532,
"step": 17500
},
{
"epoch": 0.26,
"learning_rate": 3.6764705882352945e-05,
"loss": 3.0215,
"step": 18000
},
{
"epoch": 0.27,
"learning_rate": 3.639705882352941e-05,
"loss": 3.0597,
"step": 18500
},
{
"epoch": 0.28,
"learning_rate": 3.6029411764705886e-05,
"loss": 3.0665,
"step": 19000
},
{
"epoch": 0.29,
"learning_rate": 3.566176470588235e-05,
"loss": 3.0695,
"step": 19500
},
{
"epoch": 0.29,
"learning_rate": 3.529411764705883e-05,
"loss": 3.0227,
"step": 20000
},
{
"epoch": 0.3,
"learning_rate": 3.4926470588235294e-05,
"loss": 3.0734,
"step": 20500
},
{
"epoch": 0.31,
"learning_rate": 3.455882352941177e-05,
"loss": 3.0519,
"step": 21000
},
{
"epoch": 0.32,
"learning_rate": 3.4191176470588236e-05,
"loss": 3.0109,
"step": 21500
},
{
"epoch": 0.32,
"learning_rate": 3.382352941176471e-05,
"loss": 2.9793,
"step": 22000
}
],
"logging_steps": 500,
"max_steps": 68000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 2000,
"total_flos": 1.5955896827904e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}