lora-function-more-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
0d4ebbb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.0718016624450684,
"learning_rate": 1.267605633802817e-05,
"loss": 1.0998,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.0505307912826538,
"eval_runtime": 60.3266,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 3.964005947113037,
"learning_rate": 2.535211267605634e-05,
"loss": 0.9713,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 0.8683193922042847,
"eval_runtime": 60.3172,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.101376533508301,
"learning_rate": 3.802816901408451e-05,
"loss": 0.8291,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 0.810826301574707,
"eval_runtime": 60.3149,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 1.6203935146331787,
"learning_rate": 4.992175273865415e-05,
"loss": 0.7965,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 0.784785807132721,
"eval_runtime": 60.3247,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.086142063140869,
"learning_rate": 4.85133020344288e-05,
"loss": 0.7287,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 0.7797535061836243,
"eval_runtime": 60.326,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 1.9750502109527588,
"learning_rate": 4.710485133020345e-05,
"loss": 0.7509,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 0.7676067352294922,
"eval_runtime": 60.2996,
"eval_samples_per_second": 3.731,
"eval_steps_per_second": 0.945,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.0204286575317383,
"learning_rate": 4.569640062597809e-05,
"loss": 0.7793,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 0.7613245844841003,
"eval_runtime": 60.3289,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 2.257814407348633,
"learning_rate": 4.428794992175274e-05,
"loss": 0.7348,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 0.7604629397392273,
"eval_runtime": 60.3175,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.0063090324401855,
"learning_rate": 4.287949921752739e-05,
"loss": 0.6786,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 0.7632331252098083,
"eval_runtime": 60.3172,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 2.430443048477173,
"learning_rate": 4.1471048513302035e-05,
"loss": 0.6789,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 0.7604073882102966,
"eval_runtime": 60.3176,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 0.945,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 1.8232342004776,
"learning_rate": 4.0062597809076686e-05,
"loss": 0.7177,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 0.7575519680976868,
"eval_runtime": 60.3302,
"eval_samples_per_second": 3.729,
"eval_steps_per_second": 0.945,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 4.802533196370739e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}