nielsr's picture
nielsr HF staff
End of training
1ee18f0
raw
history blame
8.24 kB
{
"best_metric": 0.9829629629629629,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat-kornia/checkpoint-570",
"epoch": 3.0,
"global_step": 570,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 8.771929824561403e-06,
"loss": 2.2821,
"step": 10
},
{
"epoch": 0.11,
"learning_rate": 1.7543859649122806e-05,
"loss": 2.0817,
"step": 20
},
{
"epoch": 0.16,
"learning_rate": 2.6315789473684212e-05,
"loss": 1.5479,
"step": 30
},
{
"epoch": 0.21,
"learning_rate": 3.508771929824561e-05,
"loss": 0.8429,
"step": 40
},
{
"epoch": 0.26,
"learning_rate": 4.3859649122807014e-05,
"loss": 0.4422,
"step": 50
},
{
"epoch": 0.32,
"learning_rate": 4.970760233918128e-05,
"loss": 0.3354,
"step": 60
},
{
"epoch": 0.37,
"learning_rate": 4.8732943469785574e-05,
"loss": 0.249,
"step": 70
},
{
"epoch": 0.42,
"learning_rate": 4.7758284600389865e-05,
"loss": 0.2015,
"step": 80
},
{
"epoch": 0.47,
"learning_rate": 4.678362573099415e-05,
"loss": 0.1684,
"step": 90
},
{
"epoch": 0.53,
"learning_rate": 4.580896686159844e-05,
"loss": 0.1307,
"step": 100
},
{
"epoch": 0.58,
"learning_rate": 4.483430799220273e-05,
"loss": 0.1544,
"step": 110
},
{
"epoch": 0.63,
"learning_rate": 4.3859649122807014e-05,
"loss": 0.1504,
"step": 120
},
{
"epoch": 0.68,
"learning_rate": 4.2884990253411305e-05,
"loss": 0.15,
"step": 130
},
{
"epoch": 0.74,
"learning_rate": 4.1910331384015596e-05,
"loss": 0.112,
"step": 140
},
{
"epoch": 0.79,
"learning_rate": 4.093567251461988e-05,
"loss": 0.1155,
"step": 150
},
{
"epoch": 0.84,
"learning_rate": 3.996101364522417e-05,
"loss": 0.1182,
"step": 160
},
{
"epoch": 0.89,
"learning_rate": 3.898635477582846e-05,
"loss": 0.0944,
"step": 170
},
{
"epoch": 0.95,
"learning_rate": 3.8011695906432746e-05,
"loss": 0.0843,
"step": 180
},
{
"epoch": 1.0,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.0859,
"step": 190
},
{
"epoch": 1.0,
"eval_accuracy": 0.9685185185185186,
"eval_loss": 0.09687485545873642,
"eval_runtime": 20.7234,
"eval_samples_per_second": 130.287,
"eval_steps_per_second": 4.102,
"step": 190
},
{
"epoch": 1.05,
"learning_rate": 3.606237816764133e-05,
"loss": 0.0737,
"step": 200
},
{
"epoch": 1.11,
"learning_rate": 3.508771929824561e-05,
"loss": 0.055,
"step": 210
},
{
"epoch": 1.16,
"learning_rate": 3.41130604288499e-05,
"loss": 0.0686,
"step": 220
},
{
"epoch": 1.21,
"learning_rate": 3.313840155945419e-05,
"loss": 0.0954,
"step": 230
},
{
"epoch": 1.26,
"learning_rate": 3.216374269005848e-05,
"loss": 0.0619,
"step": 240
},
{
"epoch": 1.32,
"learning_rate": 3.118908382066277e-05,
"loss": 0.0701,
"step": 250
},
{
"epoch": 1.37,
"learning_rate": 3.0214424951267055e-05,
"loss": 0.0682,
"step": 260
},
{
"epoch": 1.42,
"learning_rate": 2.9239766081871346e-05,
"loss": 0.0592,
"step": 270
},
{
"epoch": 1.47,
"learning_rate": 2.8265107212475634e-05,
"loss": 0.065,
"step": 280
},
{
"epoch": 1.53,
"learning_rate": 2.729044834307992e-05,
"loss": 0.0959,
"step": 290
},
{
"epoch": 1.58,
"learning_rate": 2.6315789473684212e-05,
"loss": 0.0613,
"step": 300
},
{
"epoch": 1.63,
"learning_rate": 2.53411306042885e-05,
"loss": 0.0628,
"step": 310
},
{
"epoch": 1.68,
"learning_rate": 2.4366471734892787e-05,
"loss": 0.0565,
"step": 320
},
{
"epoch": 1.74,
"learning_rate": 2.3391812865497074e-05,
"loss": 0.0629,
"step": 330
},
{
"epoch": 1.79,
"learning_rate": 2.2417153996101365e-05,
"loss": 0.0674,
"step": 340
},
{
"epoch": 1.84,
"learning_rate": 2.1442495126705653e-05,
"loss": 0.0589,
"step": 350
},
{
"epoch": 1.89,
"learning_rate": 2.046783625730994e-05,
"loss": 0.0551,
"step": 360
},
{
"epoch": 1.95,
"learning_rate": 1.949317738791423e-05,
"loss": 0.0506,
"step": 370
},
{
"epoch": 2.0,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0664,
"step": 380
},
{
"epoch": 2.0,
"eval_accuracy": 0.9814814814814815,
"eval_loss": 0.06272529065608978,
"eval_runtime": 20.6321,
"eval_samples_per_second": 130.864,
"eval_steps_per_second": 4.12,
"step": 380
},
{
"epoch": 2.05,
"learning_rate": 1.7543859649122806e-05,
"loss": 0.0306,
"step": 390
},
{
"epoch": 2.11,
"learning_rate": 1.6569200779727097e-05,
"loss": 0.0482,
"step": 400
},
{
"epoch": 2.16,
"learning_rate": 1.5594541910331384e-05,
"loss": 0.0343,
"step": 410
},
{
"epoch": 2.21,
"learning_rate": 1.4619883040935673e-05,
"loss": 0.0324,
"step": 420
},
{
"epoch": 2.26,
"learning_rate": 1.364522417153996e-05,
"loss": 0.0428,
"step": 430
},
{
"epoch": 2.32,
"learning_rate": 1.267056530214425e-05,
"loss": 0.0473,
"step": 440
},
{
"epoch": 2.37,
"learning_rate": 1.1695906432748537e-05,
"loss": 0.0233,
"step": 450
},
{
"epoch": 2.42,
"learning_rate": 1.0721247563352826e-05,
"loss": 0.0448,
"step": 460
},
{
"epoch": 2.47,
"learning_rate": 9.746588693957115e-06,
"loss": 0.0388,
"step": 470
},
{
"epoch": 2.53,
"learning_rate": 8.771929824561403e-06,
"loss": 0.0352,
"step": 480
},
{
"epoch": 2.58,
"learning_rate": 7.797270955165692e-06,
"loss": 0.0202,
"step": 490
},
{
"epoch": 2.63,
"learning_rate": 6.82261208576998e-06,
"loss": 0.0209,
"step": 500
},
{
"epoch": 2.68,
"learning_rate": 5.8479532163742686e-06,
"loss": 0.0299,
"step": 510
},
{
"epoch": 2.74,
"learning_rate": 4.873294346978558e-06,
"loss": 0.0409,
"step": 520
},
{
"epoch": 2.79,
"learning_rate": 3.898635477582846e-06,
"loss": 0.0328,
"step": 530
},
{
"epoch": 2.84,
"learning_rate": 2.9239766081871343e-06,
"loss": 0.0293,
"step": 540
},
{
"epoch": 2.89,
"learning_rate": 1.949317738791423e-06,
"loss": 0.0321,
"step": 550
},
{
"epoch": 2.95,
"learning_rate": 9.746588693957115e-07,
"loss": 0.0424,
"step": 560
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 0.0359,
"step": 570
},
{
"epoch": 3.0,
"eval_accuracy": 0.9829629629629629,
"eval_loss": 0.05404135212302208,
"eval_runtime": 20.7468,
"eval_samples_per_second": 130.141,
"eval_steps_per_second": 4.097,
"step": 570
},
{
"epoch": 3.0,
"step": 570,
"total_flos": 1.8124066505760768e+18,
"train_loss": 0.19761514681995962,
"train_runtime": 1782.2808,
"train_samples_per_second": 40.903,
"train_steps_per_second": 0.32
}
],
"max_steps": 570,
"num_train_epochs": 3,
"total_flos": 1.8124066505760768e+18,
"trial_name": null,
"trial_params": null
}