lora-type-expository-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
d68f214 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.052495241165161,
"learning_rate": 1.267605633802817e-05,
"loss": 1.5181,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.4102150201797485,
"eval_runtime": 58.6295,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.972,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.247065782546997,
"learning_rate": 2.535211267605634e-05,
"loss": 1.3096,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 1.2001529932022095,
"eval_runtime": 58.6195,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.972,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.313900947570801,
"learning_rate": 3.802816901408451e-05,
"loss": 1.191,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 1.1402766704559326,
"eval_runtime": 58.6237,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.972,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 2.6796553134918213,
"learning_rate": 4.992175273865415e-05,
"loss": 1.1728,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 1.1215072870254517,
"eval_runtime": 58.628,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.972,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.153010845184326,
"learning_rate": 4.85133020344288e-05,
"loss": 1.088,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 1.106386423110962,
"eval_runtime": 58.608,
"eval_samples_per_second": 3.839,
"eval_steps_per_second": 0.973,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.141529083251953,
"learning_rate": 4.710485133020345e-05,
"loss": 1.1081,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 1.095576286315918,
"eval_runtime": 58.6271,
"eval_samples_per_second": 3.838,
"eval_steps_per_second": 0.972,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.5019514560699463,
"learning_rate": 4.569640062597809e-05,
"loss": 1.138,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 1.089218020439148,
"eval_runtime": 58.6083,
"eval_samples_per_second": 3.839,
"eval_steps_per_second": 0.973,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 2.8438944816589355,
"learning_rate": 4.428794992175274e-05,
"loss": 1.0855,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 1.0866470336914062,
"eval_runtime": 58.5985,
"eval_samples_per_second": 3.84,
"eval_steps_per_second": 0.973,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.4683525562286377,
"learning_rate": 4.287949921752739e-05,
"loss": 1.0292,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 1.0852150917053223,
"eval_runtime": 58.6647,
"eval_samples_per_second": 3.835,
"eval_steps_per_second": 0.972,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 3.097724676132202,
"learning_rate": 4.1471048513302035e-05,
"loss": 1.0259,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 1.0849454402923584,
"eval_runtime": 58.6693,
"eval_samples_per_second": 3.835,
"eval_steps_per_second": 0.972,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 2.5000972747802734,
"learning_rate": 4.0062597809076686e-05,
"loss": 1.0284,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 1.0797874927520752,
"eval_runtime": 58.6773,
"eval_samples_per_second": 3.835,
"eval_steps_per_second": 0.971,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 4.744730322350899e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}