gpt-sw3-instruct-1.3b / trainer_state.json
Oskar Holmström
First model version
6bd31c3
raw
history blame
1.82 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.8036913731674553,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23,
"learning_rate": 7.78816199376947e-06,
"loss": 2.8614,
"step": 500
},
{
"epoch": 0.47,
"learning_rate": 1.557632398753894e-05,
"loss": 0.7943,
"step": 1000
},
{
"epoch": 0.7,
"learning_rate": 1.9626168224299065e-05,
"loss": 0.7679,
"step": 1500
},
{
"epoch": 0.93,
"learning_rate": 1.8760816891658016e-05,
"loss": 0.7359,
"step": 2000
},
{
"epoch": 1.17,
"learning_rate": 1.7895465559016964e-05,
"loss": 0.6255,
"step": 2500
},
{
"epoch": 1.4,
"learning_rate": 1.7030114226375908e-05,
"loss": 0.5942,
"step": 3000
},
{
"epoch": 1.64,
"learning_rate": 1.616476289373486e-05,
"loss": 0.5809,
"step": 3500
},
{
"epoch": 1.87,
"learning_rate": 1.5299411561093806e-05,
"loss": 0.5899,
"step": 4000
},
{
"epoch": 2.1,
"learning_rate": 1.4434060228452753e-05,
"loss": 0.5161,
"step": 4500
},
{
"epoch": 2.34,
"learning_rate": 1.3568708895811699e-05,
"loss": 0.4241,
"step": 5000
},
{
"epoch": 2.57,
"learning_rate": 1.2703357563170648e-05,
"loss": 0.4283,
"step": 5500
},
{
"epoch": 2.8,
"learning_rate": 1.1838006230529595e-05,
"loss": 0.4283,
"step": 6000
}
],
"max_steps": 12840,
"num_train_epochs": 6,
"total_flos": 2.2763010362425344e+17,
"trial_name": null,
"trial_params": null
}