|
{ |
|
"best_metric": 0.0, |
|
"best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-5/checkpoint-54", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 162, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.6138535737991333, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 8.6936, |
|
"eval_samples_per_second": 119.973, |
|
"eval_steps_per_second": 7.592, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.6022257208824158, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 9.1939, |
|
"eval_samples_per_second": 113.445, |
|
"eval_steps_per_second": 7.179, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.576120138168335, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 9.1717, |
|
"eval_samples_per_second": 113.719, |
|
"eval_steps_per_second": 7.196, |
|
"step": 162 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 216, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 1.1908992105042739e-06, |
|
"num_train_epochs": 4, |
|
"per_device_train_batch_size": 16, |
|
"seed": 32 |
|
} |
|
} |
|
|