nielsr's picture
nielsr HF staff
End of training
10bef32
raw
history blame
No virus
15.4 kB
{
"best_metric": 0.9863,
"best_model_checkpoint": "vit-base-patch16-224-in21k-finetuned-cifar10/checkpoint-1170",
"epoch": 2.998080614203455,
"global_step": 1170,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.273504273504274e-06,
"loss": 2.3003,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 8.547008547008548e-06,
"loss": 2.2805,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 1.282051282051282e-05,
"loss": 2.2424,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 1.7094017094017095e-05,
"loss": 2.1851,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 2.1367521367521368e-05,
"loss": 2.0954,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 2.564102564102564e-05,
"loss": 1.9605,
"step": 60
},
{
"epoch": 0.18,
"learning_rate": 2.9914529914529915e-05,
"loss": 1.8082,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 3.418803418803419e-05,
"loss": 1.6045,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 3.846153846153846e-05,
"loss": 1.416,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 4.2735042735042735e-05,
"loss": 1.2521,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 4.700854700854701e-05,
"loss": 1.1004,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 4.985754985754986e-05,
"loss": 0.9802,
"step": 120
},
{
"epoch": 0.33,
"learning_rate": 4.938271604938271e-05,
"loss": 0.8694,
"step": 130
},
{
"epoch": 0.36,
"learning_rate": 4.890788224121557e-05,
"loss": 0.8239,
"step": 140
},
{
"epoch": 0.38,
"learning_rate": 4.8433048433048433e-05,
"loss": 0.726,
"step": 150
},
{
"epoch": 0.41,
"learning_rate": 4.7958214624881294e-05,
"loss": 0.6769,
"step": 160
},
{
"epoch": 0.44,
"learning_rate": 4.7483380816714154e-05,
"loss": 0.653,
"step": 170
},
{
"epoch": 0.46,
"learning_rate": 4.700854700854701e-05,
"loss": 0.6043,
"step": 180
},
{
"epoch": 0.49,
"learning_rate": 4.653371320037987e-05,
"loss": 0.6188,
"step": 190
},
{
"epoch": 0.51,
"learning_rate": 4.605887939221273e-05,
"loss": 0.5447,
"step": 200
},
{
"epoch": 0.54,
"learning_rate": 4.558404558404559e-05,
"loss": 0.5582,
"step": 210
},
{
"epoch": 0.56,
"learning_rate": 4.510921177587845e-05,
"loss": 0.525,
"step": 220
},
{
"epoch": 0.59,
"learning_rate": 4.463437796771131e-05,
"loss": 0.5545,
"step": 230
},
{
"epoch": 0.61,
"learning_rate": 4.415954415954416e-05,
"loss": 0.5203,
"step": 240
},
{
"epoch": 0.64,
"learning_rate": 4.368471035137702e-05,
"loss": 0.5239,
"step": 250
},
{
"epoch": 0.67,
"learning_rate": 4.3209876543209875e-05,
"loss": 0.4704,
"step": 260
},
{
"epoch": 0.69,
"learning_rate": 4.2735042735042735e-05,
"loss": 0.5016,
"step": 270
},
{
"epoch": 0.72,
"learning_rate": 4.2260208926875595e-05,
"loss": 0.4651,
"step": 280
},
{
"epoch": 0.74,
"learning_rate": 4.1785375118708455e-05,
"loss": 0.4805,
"step": 290
},
{
"epoch": 0.77,
"learning_rate": 4.131054131054131e-05,
"loss": 0.4609,
"step": 300
},
{
"epoch": 0.79,
"learning_rate": 4.083570750237417e-05,
"loss": 0.4392,
"step": 310
},
{
"epoch": 0.82,
"learning_rate": 4.036087369420703e-05,
"loss": 0.4761,
"step": 320
},
{
"epoch": 0.84,
"learning_rate": 3.988603988603989e-05,
"loss": 0.4517,
"step": 330
},
{
"epoch": 0.87,
"learning_rate": 3.941120607787275e-05,
"loss": 0.4361,
"step": 340
},
{
"epoch": 0.9,
"learning_rate": 3.893637226970561e-05,
"loss": 0.4287,
"step": 350
},
{
"epoch": 0.92,
"learning_rate": 3.846153846153846e-05,
"loss": 0.4151,
"step": 360
},
{
"epoch": 0.95,
"learning_rate": 3.798670465337132e-05,
"loss": 0.4098,
"step": 370
},
{
"epoch": 0.97,
"learning_rate": 3.7511870845204176e-05,
"loss": 0.4236,
"step": 380
},
{
"epoch": 1.0,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3897,
"step": 390
},
{
"epoch": 1.0,
"eval_accuracy": 0.9757,
"eval_loss": 0.20828521251678467,
"eval_runtime": 85.3525,
"eval_samples_per_second": 117.161,
"eval_steps_per_second": 3.667,
"step": 390
},
{
"epoch": 1.03,
"learning_rate": 3.65622032288699e-05,
"loss": 0.4293,
"step": 400
},
{
"epoch": 1.05,
"learning_rate": 3.608736942070276e-05,
"loss": 0.3485,
"step": 410
},
{
"epoch": 1.08,
"learning_rate": 3.561253561253561e-05,
"loss": 0.387,
"step": 420
},
{
"epoch": 1.1,
"learning_rate": 3.513770180436847e-05,
"loss": 0.3417,
"step": 430
},
{
"epoch": 1.13,
"learning_rate": 3.466286799620133e-05,
"loss": 0.3836,
"step": 440
},
{
"epoch": 1.15,
"learning_rate": 3.418803418803419e-05,
"loss": 0.3623,
"step": 450
},
{
"epoch": 1.18,
"learning_rate": 3.371320037986705e-05,
"loss": 0.3662,
"step": 460
},
{
"epoch": 1.2,
"learning_rate": 3.323836657169991e-05,
"loss": 0.3724,
"step": 470
},
{
"epoch": 1.23,
"learning_rate": 3.2763532763532764e-05,
"loss": 0.3278,
"step": 480
},
{
"epoch": 1.26,
"learning_rate": 3.2288698955365625e-05,
"loss": 0.3703,
"step": 490
},
{
"epoch": 1.28,
"learning_rate": 3.181386514719848e-05,
"loss": 0.3691,
"step": 500
},
{
"epoch": 1.31,
"learning_rate": 3.133903133903134e-05,
"loss": 0.3644,
"step": 510
},
{
"epoch": 1.33,
"learning_rate": 3.08641975308642e-05,
"loss": 0.3536,
"step": 520
},
{
"epoch": 1.36,
"learning_rate": 3.0389363722697055e-05,
"loss": 0.3383,
"step": 530
},
{
"epoch": 1.38,
"learning_rate": 2.9914529914529915e-05,
"loss": 0.3239,
"step": 540
},
{
"epoch": 1.41,
"learning_rate": 2.9439696106362775e-05,
"loss": 0.3155,
"step": 550
},
{
"epoch": 1.44,
"learning_rate": 2.8964862298195632e-05,
"loss": 0.3401,
"step": 560
},
{
"epoch": 1.46,
"learning_rate": 2.8490028490028492e-05,
"loss": 0.3221,
"step": 570
},
{
"epoch": 1.49,
"learning_rate": 2.8015194681861352e-05,
"loss": 0.3183,
"step": 580
},
{
"epoch": 1.51,
"learning_rate": 2.754036087369421e-05,
"loss": 0.3256,
"step": 590
},
{
"epoch": 1.54,
"learning_rate": 2.706552706552707e-05,
"loss": 0.3407,
"step": 600
},
{
"epoch": 1.56,
"learning_rate": 2.6590693257359926e-05,
"loss": 0.2972,
"step": 610
},
{
"epoch": 1.59,
"learning_rate": 2.611585944919278e-05,
"loss": 0.3366,
"step": 620
},
{
"epoch": 1.61,
"learning_rate": 2.564102564102564e-05,
"loss": 0.2942,
"step": 630
},
{
"epoch": 1.64,
"learning_rate": 2.51661918328585e-05,
"loss": 0.3066,
"step": 640
},
{
"epoch": 1.67,
"learning_rate": 2.4691358024691357e-05,
"loss": 0.3247,
"step": 650
},
{
"epoch": 1.69,
"learning_rate": 2.4216524216524217e-05,
"loss": 0.3191,
"step": 660
},
{
"epoch": 1.72,
"learning_rate": 2.3741690408357077e-05,
"loss": 0.3076,
"step": 670
},
{
"epoch": 1.74,
"learning_rate": 2.3266856600189934e-05,
"loss": 0.3037,
"step": 680
},
{
"epoch": 1.77,
"learning_rate": 2.2792022792022794e-05,
"loss": 0.2986,
"step": 690
},
{
"epoch": 1.79,
"learning_rate": 2.2317188983855654e-05,
"loss": 0.2659,
"step": 700
},
{
"epoch": 1.82,
"learning_rate": 2.184235517568851e-05,
"loss": 0.3203,
"step": 710
},
{
"epoch": 1.84,
"learning_rate": 2.1367521367521368e-05,
"loss": 0.2952,
"step": 720
},
{
"epoch": 1.87,
"learning_rate": 2.0892687559354228e-05,
"loss": 0.3053,
"step": 730
},
{
"epoch": 1.9,
"learning_rate": 2.0417853751187084e-05,
"loss": 0.2889,
"step": 740
},
{
"epoch": 1.92,
"learning_rate": 1.9943019943019945e-05,
"loss": 0.2518,
"step": 750
},
{
"epoch": 1.95,
"learning_rate": 1.9468186134852805e-05,
"loss": 0.2816,
"step": 760
},
{
"epoch": 1.97,
"learning_rate": 1.899335232668566e-05,
"loss": 0.2813,
"step": 770
},
{
"epoch": 2.0,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.3045,
"step": 780
},
{
"epoch": 2.0,
"eval_accuracy": 0.986,
"eval_loss": 0.11745991557836533,
"eval_runtime": 84.0012,
"eval_samples_per_second": 119.046,
"eval_steps_per_second": 3.726,
"step": 780
},
{
"epoch": 2.03,
"learning_rate": 1.804368471035138e-05,
"loss": 0.2973,
"step": 790
},
{
"epoch": 2.05,
"learning_rate": 1.7568850902184235e-05,
"loss": 0.27,
"step": 800
},
{
"epoch": 2.08,
"learning_rate": 1.7094017094017095e-05,
"loss": 0.2932,
"step": 810
},
{
"epoch": 2.1,
"learning_rate": 1.6619183285849956e-05,
"loss": 0.3179,
"step": 820
},
{
"epoch": 2.13,
"learning_rate": 1.6144349477682812e-05,
"loss": 0.2581,
"step": 830
},
{
"epoch": 2.15,
"learning_rate": 1.566951566951567e-05,
"loss": 0.275,
"step": 840
},
{
"epoch": 2.18,
"learning_rate": 1.5194681861348528e-05,
"loss": 0.2882,
"step": 850
},
{
"epoch": 2.2,
"learning_rate": 1.4719848053181388e-05,
"loss": 0.26,
"step": 860
},
{
"epoch": 2.23,
"learning_rate": 1.4245014245014246e-05,
"loss": 0.2748,
"step": 870
},
{
"epoch": 2.26,
"learning_rate": 1.3770180436847105e-05,
"loss": 0.2553,
"step": 880
},
{
"epoch": 2.28,
"learning_rate": 1.3295346628679963e-05,
"loss": 0.2578,
"step": 890
},
{
"epoch": 2.31,
"learning_rate": 1.282051282051282e-05,
"loss": 0.3364,
"step": 900
},
{
"epoch": 2.33,
"learning_rate": 1.2345679012345678e-05,
"loss": 0.2927,
"step": 910
},
{
"epoch": 2.36,
"learning_rate": 1.1870845204178538e-05,
"loss": 0.2481,
"step": 920
},
{
"epoch": 2.38,
"learning_rate": 1.1396011396011397e-05,
"loss": 0.2591,
"step": 930
},
{
"epoch": 2.41,
"learning_rate": 1.0921177587844255e-05,
"loss": 0.2648,
"step": 940
},
{
"epoch": 2.44,
"learning_rate": 1.0446343779677114e-05,
"loss": 0.2691,
"step": 950
},
{
"epoch": 2.46,
"learning_rate": 9.971509971509972e-06,
"loss": 0.2627,
"step": 960
},
{
"epoch": 2.49,
"learning_rate": 9.49667616334283e-06,
"loss": 0.2612,
"step": 970
},
{
"epoch": 2.51,
"learning_rate": 9.02184235517569e-06,
"loss": 0.2389,
"step": 980
},
{
"epoch": 2.54,
"learning_rate": 8.547008547008548e-06,
"loss": 0.2792,
"step": 990
},
{
"epoch": 2.56,
"learning_rate": 8.072174738841406e-06,
"loss": 0.2446,
"step": 1000
},
{
"epoch": 2.59,
"learning_rate": 7.597340930674264e-06,
"loss": 0.2305,
"step": 1010
},
{
"epoch": 2.61,
"learning_rate": 7.122507122507123e-06,
"loss": 0.2527,
"step": 1020
},
{
"epoch": 2.64,
"learning_rate": 6.6476733143399815e-06,
"loss": 0.2403,
"step": 1030
},
{
"epoch": 2.67,
"learning_rate": 6.172839506172839e-06,
"loss": 0.2254,
"step": 1040
},
{
"epoch": 2.69,
"learning_rate": 5.6980056980056985e-06,
"loss": 0.2879,
"step": 1050
},
{
"epoch": 2.72,
"learning_rate": 5.223171889838557e-06,
"loss": 0.2233,
"step": 1060
},
{
"epoch": 2.74,
"learning_rate": 4.748338081671415e-06,
"loss": 0.2258,
"step": 1070
},
{
"epoch": 2.77,
"learning_rate": 4.273504273504274e-06,
"loss": 0.2382,
"step": 1080
},
{
"epoch": 2.79,
"learning_rate": 3.798670465337132e-06,
"loss": 0.2564,
"step": 1090
},
{
"epoch": 2.82,
"learning_rate": 3.3238366571699908e-06,
"loss": 0.2513,
"step": 1100
},
{
"epoch": 2.84,
"learning_rate": 2.8490028490028492e-06,
"loss": 0.2152,
"step": 1110
},
{
"epoch": 2.87,
"learning_rate": 2.3741690408357077e-06,
"loss": 0.243,
"step": 1120
},
{
"epoch": 2.9,
"learning_rate": 1.899335232668566e-06,
"loss": 0.2701,
"step": 1130
},
{
"epoch": 2.92,
"learning_rate": 1.4245014245014246e-06,
"loss": 0.2531,
"step": 1140
},
{
"epoch": 2.95,
"learning_rate": 9.49667616334283e-07,
"loss": 0.2637,
"step": 1150
},
{
"epoch": 2.97,
"learning_rate": 4.748338081671415e-07,
"loss": 0.2424,
"step": 1160
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 0.2524,
"step": 1170
},
{
"epoch": 3.0,
"eval_accuracy": 0.9863,
"eval_loss": 0.09992218017578125,
"eval_runtime": 83.7402,
"eval_samples_per_second": 119.417,
"eval_steps_per_second": 3.738,
"step": 1170
},
{
"epoch": 3.0,
"step": 1170,
"total_flos": 1.161843208194687e+19,
"train_loss": 0.5011348225112654,
"train_runtime": 3321.3479,
"train_samples_per_second": 45.162,
"train_steps_per_second": 0.352
}
],
"max_steps": 1170,
"num_train_epochs": 3,
"total_flos": 1.161843208194687e+19,
"trial_name": null,
"trial_params": null
}