Mistral-7B-v0.1_case-briefs / trainer_state.json
zlucia's picture
End of training
89b676a verified
raw
history blame contribute delete
No virus
4.73 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.99660441426146,
"eval_steps": 50,
"global_step": 294,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 3e-05,
"loss": 1.2028,
"step": 10
},
{
"epoch": 0.14,
"learning_rate": 3e-05,
"loss": 1.1367,
"step": 20
},
{
"epoch": 0.2,
"learning_rate": 3e-05,
"loss": 1.1322,
"step": 30
},
{
"epoch": 0.27,
"learning_rate": 3e-05,
"loss": 1.1106,
"step": 40
},
{
"epoch": 0.34,
"learning_rate": 3e-05,
"loss": 1.1008,
"step": 50
},
{
"epoch": 0.34,
"eval_loss": 1.1495487689971924,
"eval_runtime": 15.8798,
"eval_samples_per_second": 16.499,
"eval_steps_per_second": 4.156,
"step": 50
},
{
"epoch": 0.41,
"learning_rate": 3e-05,
"loss": 1.0936,
"step": 60
},
{
"epoch": 0.48,
"learning_rate": 3e-05,
"loss": 1.0916,
"step": 70
},
{
"epoch": 0.54,
"learning_rate": 3e-05,
"loss": 1.1387,
"step": 80
},
{
"epoch": 0.61,
"learning_rate": 3e-05,
"loss": 1.1185,
"step": 90
},
{
"epoch": 0.68,
"learning_rate": 3e-05,
"loss": 1.0662,
"step": 100
},
{
"epoch": 0.68,
"eval_loss": 1.1313235759735107,
"eval_runtime": 15.8959,
"eval_samples_per_second": 16.482,
"eval_steps_per_second": 4.152,
"step": 100
},
{
"epoch": 0.75,
"learning_rate": 3e-05,
"loss": 1.0562,
"step": 110
},
{
"epoch": 0.81,
"learning_rate": 3e-05,
"loss": 1.1093,
"step": 120
},
{
"epoch": 0.88,
"learning_rate": 3e-05,
"loss": 1.0944,
"step": 130
},
{
"epoch": 0.95,
"learning_rate": 3e-05,
"loss": 1.0736,
"step": 140
},
{
"epoch": 1.02,
"learning_rate": 3e-05,
"loss": 1.039,
"step": 150
},
{
"epoch": 1.02,
"eval_loss": 1.1258115768432617,
"eval_runtime": 16.0332,
"eval_samples_per_second": 16.341,
"eval_steps_per_second": 4.116,
"step": 150
},
{
"epoch": 1.09,
"learning_rate": 3e-05,
"loss": 0.9901,
"step": 160
},
{
"epoch": 1.15,
"learning_rate": 3e-05,
"loss": 1.0627,
"step": 170
},
{
"epoch": 1.22,
"learning_rate": 3e-05,
"loss": 0.9774,
"step": 180
},
{
"epoch": 1.29,
"learning_rate": 3e-05,
"loss": 0.9502,
"step": 190
},
{
"epoch": 1.36,
"learning_rate": 3e-05,
"loss": 1.0173,
"step": 200
},
{
"epoch": 1.36,
"eval_loss": 1.1320679187774658,
"eval_runtime": 15.9046,
"eval_samples_per_second": 16.473,
"eval_steps_per_second": 4.15,
"step": 200
},
{
"epoch": 1.43,
"learning_rate": 3e-05,
"loss": 0.9879,
"step": 210
},
{
"epoch": 1.49,
"learning_rate": 3e-05,
"loss": 0.9056,
"step": 220
},
{
"epoch": 1.56,
"learning_rate": 3e-05,
"loss": 0.9976,
"step": 230
},
{
"epoch": 1.63,
"learning_rate": 3e-05,
"loss": 0.989,
"step": 240
},
{
"epoch": 1.7,
"learning_rate": 3e-05,
"loss": 1.0279,
"step": 250
},
{
"epoch": 1.7,
"eval_loss": 1.129327416419983,
"eval_runtime": 15.4802,
"eval_samples_per_second": 16.925,
"eval_steps_per_second": 4.264,
"step": 250
},
{
"epoch": 1.77,
"learning_rate": 3e-05,
"loss": 0.9651,
"step": 260
},
{
"epoch": 1.83,
"learning_rate": 3e-05,
"loss": 1.0424,
"step": 270
},
{
"epoch": 1.9,
"learning_rate": 3e-05,
"loss": 0.962,
"step": 280
},
{
"epoch": 1.97,
"learning_rate": 3e-05,
"loss": 0.9308,
"step": 290
},
{
"epoch": 2.0,
"step": 294,
"total_flos": 7.119009482145792e+16,
"train_loss": 1.0446421305338542,
"train_runtime": 881.4723,
"train_samples_per_second": 5.346,
"train_steps_per_second": 0.334
}
],
"logging_steps": 10,
"max_steps": 294,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 250,
"total_flos": 7.119009482145792e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}