lora-type-narrative-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
7102a6c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 1.5155491828918457,
"learning_rate": 1.267605633802817e-05,
"loss": 1.8226,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.8172667026519775,
"eval_runtime": 73.1932,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 1.649418830871582,
"learning_rate": 2.535211267605634e-05,
"loss": 1.6744,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 1.5906846523284912,
"eval_runtime": 73.1988,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.1592600345611572,
"learning_rate": 3.802816901408451e-05,
"loss": 1.5273,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 1.526986002922058,
"eval_runtime": 73.174,
"eval_samples_per_second": 3.075,
"eval_steps_per_second": 0.779,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 2.3729259967803955,
"learning_rate": 4.992175273865415e-05,
"loss": 1.4981,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 1.5036466121673584,
"eval_runtime": 73.1899,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.0879106521606445,
"learning_rate": 4.85133020344288e-05,
"loss": 1.4484,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 1.4860492944717407,
"eval_runtime": 73.1845,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.163222074508667,
"learning_rate": 4.710485133020345e-05,
"loss": 1.4242,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 1.4793643951416016,
"eval_runtime": 73.1805,
"eval_samples_per_second": 3.075,
"eval_steps_per_second": 0.779,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.0721864700317383,
"learning_rate": 4.569640062597809e-05,
"loss": 1.4706,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 1.473569631576538,
"eval_runtime": 73.2169,
"eval_samples_per_second": 3.073,
"eval_steps_per_second": 0.779,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 2.9931325912475586,
"learning_rate": 4.428794992175274e-05,
"loss": 1.4157,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 1.4716590642929077,
"eval_runtime": 73.218,
"eval_samples_per_second": 3.073,
"eval_steps_per_second": 0.778,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.3123691082000732,
"learning_rate": 4.287949921752739e-05,
"loss": 1.3791,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 1.473205804824829,
"eval_runtime": 73.1999,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 2.8663740158081055,
"learning_rate": 4.1471048513302035e-05,
"loss": 1.3836,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 1.4686306715011597,
"eval_runtime": 73.1902,
"eval_samples_per_second": 3.074,
"eval_steps_per_second": 0.779,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 2.138514757156372,
"learning_rate": 4.0062597809076686e-05,
"loss": 1.3963,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 1.4640343189239502,
"eval_runtime": 73.1446,
"eval_samples_per_second": 3.076,
"eval_steps_per_second": 0.779,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 6.008456641536e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}