anamelchor's picture
Training in progress, epoch 1
d23e3b5 verified
raw
history blame
1.59 kB
{
"best_metric": 0.3019091863081111,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-428",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 642,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_loss": 0.5719716548919678,
"eval_matthews_correlation": 0.0835915715371112,
"eval_runtime": 0.6978,
"eval_samples_per_second": 1494.771,
"eval_steps_per_second": 94.588,
"step": 214
},
{
"epoch": 2.0,
"eval_loss": 1.0430289506912231,
"eval_matthews_correlation": 0.3019091863081111,
"eval_runtime": 0.7554,
"eval_samples_per_second": 1380.773,
"eval_steps_per_second": 87.374,
"step": 428
},
{
"epoch": 2.34,
"grad_norm": 75.47649383544922,
"learning_rate": 7.747466282949623e-06,
"loss": 0.4411,
"step": 500
},
{
"epoch": 3.0,
"eval_loss": 1.2935222387313843,
"eval_matthews_correlation": 0.2953592953626779,
"eval_runtime": 0.7793,
"eval_samples_per_second": 1338.334,
"eval_steps_per_second": 84.688,
"step": 642
}
],
"logging_steps": 500,
"max_steps": 642,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 8286715452252.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 3.5027277138406044e-05,
"num_train_epochs": 3,
"per_device_train_batch_size": 4,
"seed": 29
}
}