cvapict's picture
End of training
f52379d verified
{
"best_metric": 0.582995951417004,
"best_model_checkpoint": "distilbert-base-multilingual-cased-hyper-matt/run-4r5wbvc3/checkpoint-50",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 1.2984086275100708,
"learning_rate": 2.8254267437810627e-05,
"loss": 0.6014,
"step": 10
},
{
"epoch": 0.4,
"grad_norm": 1.4973173141479492,
"learning_rate": 2.119070057835797e-05,
"loss": 0.5197,
"step": 20
},
{
"epoch": 0.6,
"grad_norm": 1.660354733467102,
"learning_rate": 1.4127133718905313e-05,
"loss": 0.4239,
"step": 30
},
{
"epoch": 0.8,
"grad_norm": 1.9838685989379883,
"learning_rate": 7.063566859452657e-06,
"loss": 0.4782,
"step": 40
},
{
"epoch": 1.0,
"grad_norm": 2.0562429428100586,
"learning_rate": 0.0,
"loss": 0.4456,
"step": 50
},
{
"epoch": 1.0,
"eval_accuracy": 0.7425,
"eval_f1": 0.582995951417004,
"eval_loss": 0.4402432143688202,
"eval_precision": 0.5806451612903226,
"eval_recall": 0.5853658536585366,
"eval_runtime": 1.5859,
"eval_samples_per_second": 252.225,
"eval_steps_per_second": 15.764,
"step": 50
}
],
"logging_steps": 10,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 211815370450944.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"_wandb": {},
"assignments": {},
"learning_rate": 3.531783429726328e-05,
"metric": "eval/loss",
"num_train_epochs": 1,
"per_device_train_batch_size": 32,
"seed": 5
}
}