File size: 1,401 Bytes
f4c38c7 9348172 f4c38c7 9348172 f4c38c7 9348172 f4c38c7 9348172 f4c38c7 9348172 f4c38c7 9348172 f4c38c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
{
"best_metric": 0.5280629029350907,
"best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-321",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 321,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_loss": 0.5697229504585266,
"eval_matthews_correlation": 0.0,
"eval_runtime": 7.6758,
"eval_samples_per_second": 135.882,
"eval_steps_per_second": 8.598,
"step": 107
},
{
"epoch": 2.0,
"eval_loss": 0.4736369848251343,
"eval_matthews_correlation": 0.37472354614099185,
"eval_runtime": 8.2012,
"eval_samples_per_second": 127.176,
"eval_steps_per_second": 8.048,
"step": 214
},
{
"epoch": 3.0,
"eval_loss": 0.41821202635765076,
"eval_matthews_correlation": 0.5280629029350907,
"eval_runtime": 10.1308,
"eval_samples_per_second": 102.953,
"eval_steps_per_second": 6.515,
"step": 321
}
],
"logging_steps": 500,
"max_steps": 535,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 1.2524709009088454e-06,
"num_train_epochs": 5,
"per_device_train_batch_size": 8,
"seed": 39
}
}
|