yehiawp4's picture
End of training
87419f2 verified
{
"best_metric": 0.36585365853658536,
"best_model_checkpoint": "google/vivit-b-16x2-kinetics400-CAER-SAMPLE\\checkpoint-588",
"epoch": 10.066666666666666,
"eval_steps": 500,
"global_step": 2100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 21.040367126464844,
"learning_rate": 2.3809523809523808e-06,
"loss": 2.0819,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 20.962032318115234,
"learning_rate": 4.7619047619047615e-06,
"loss": 2.1082,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 19.943674087524414,
"learning_rate": 7.142857142857143e-06,
"loss": 2.148,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 20.1109619140625,
"learning_rate": 9.523809523809523e-06,
"loss": 1.9797,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 20.655811309814453,
"learning_rate": 1.1904761904761905e-05,
"loss": 2.1647,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 19.211088180541992,
"learning_rate": 1.4285714285714285e-05,
"loss": 2.0541,
"step": 60
},
{
"epoch": 0.03,
"grad_norm": 19.060794830322266,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.9982,
"step": 70
},
{
"epoch": 0.04,
"grad_norm": 18.562660217285156,
"learning_rate": 1.9047619047619046e-05,
"loss": 1.9553,
"step": 80
},
{
"epoch": 0.04,
"grad_norm": 21.629850387573242,
"learning_rate": 2.1428571428571428e-05,
"loss": 2.0218,
"step": 90
},
{
"epoch": 0.05,
"grad_norm": 22.143321990966797,
"learning_rate": 2.380952380952381e-05,
"loss": 2.0226,
"step": 100
},
{
"epoch": 0.05,
"grad_norm": 21.463300704956055,
"learning_rate": 2.6190476190476192e-05,
"loss": 2.1175,
"step": 110
},
{
"epoch": 0.06,
"grad_norm": 20.069828033447266,
"learning_rate": 2.857142857142857e-05,
"loss": 1.9767,
"step": 120
},
{
"epoch": 0.06,
"grad_norm": 21.68781280517578,
"learning_rate": 3.095238095238095e-05,
"loss": 1.944,
"step": 130
},
{
"epoch": 0.07,
"grad_norm": 22.342853546142578,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.8881,
"step": 140
},
{
"epoch": 0.07,
"grad_norm": 22.58480453491211,
"learning_rate": 3.571428571428572e-05,
"loss": 1.957,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 21.043750762939453,
"learning_rate": 3.809523809523809e-05,
"loss": 1.9067,
"step": 160
},
{
"epoch": 0.08,
"grad_norm": 18.581403732299805,
"learning_rate": 4.047619047619048e-05,
"loss": 1.8434,
"step": 170
},
{
"epoch": 0.09,
"grad_norm": 19.873624801635742,
"learning_rate": 4.2857142857142856e-05,
"loss": 1.7584,
"step": 180
},
{
"epoch": 0.09,
"grad_norm": 27.060535430908203,
"learning_rate": 4.523809523809524e-05,
"loss": 2.4781,
"step": 190
},
{
"epoch": 0.09,
"eval_accuracy": 0.24390243902439024,
"eval_loss": 1.8166238069534302,
"eval_runtime": 97.0837,
"eval_samples_per_second": 0.422,
"eval_steps_per_second": 0.216,
"step": 196
},
{
"epoch": 1.0,
"grad_norm": 22.22335433959961,
"learning_rate": 4.761904761904762e-05,
"loss": 1.8185,
"step": 200
},
{
"epoch": 1.01,
"grad_norm": 18.064006805419922,
"learning_rate": 5e-05,
"loss": 1.513,
"step": 210
},
{
"epoch": 1.01,
"grad_norm": 21.092018127441406,
"learning_rate": 4.973544973544973e-05,
"loss": 1.6385,
"step": 220
},
{
"epoch": 1.02,
"grad_norm": 24.412302017211914,
"learning_rate": 4.9470899470899475e-05,
"loss": 1.6291,
"step": 230
},
{
"epoch": 1.02,
"grad_norm": 23.470930099487305,
"learning_rate": 4.9206349206349204e-05,
"loss": 1.4416,
"step": 240
},
{
"epoch": 1.03,
"grad_norm": 24.228782653808594,
"learning_rate": 4.894179894179895e-05,
"loss": 2.2057,
"step": 250
},
{
"epoch": 1.03,
"grad_norm": 17.363630294799805,
"learning_rate": 4.8677248677248676e-05,
"loss": 1.8102,
"step": 260
},
{
"epoch": 1.04,
"grad_norm": 25.69906234741211,
"learning_rate": 4.841269841269841e-05,
"loss": 1.7703,
"step": 270
},
{
"epoch": 1.04,
"grad_norm": 24.245840072631836,
"learning_rate": 4.814814814814815e-05,
"loss": 1.8854,
"step": 280
},
{
"epoch": 1.04,
"grad_norm": 18.425830841064453,
"learning_rate": 4.7883597883597884e-05,
"loss": 1.4344,
"step": 290
},
{
"epoch": 1.05,
"grad_norm": 25.066003799438477,
"learning_rate": 4.761904761904762e-05,
"loss": 1.8341,
"step": 300
},
{
"epoch": 1.05,
"grad_norm": 18.590608596801758,
"learning_rate": 4.7354497354497356e-05,
"loss": 1.5598,
"step": 310
},
{
"epoch": 1.06,
"grad_norm": 18.962934494018555,
"learning_rate": 4.708994708994709e-05,
"loss": 1.6827,
"step": 320
},
{
"epoch": 1.06,
"grad_norm": 15.114640235900879,
"learning_rate": 4.682539682539683e-05,
"loss": 1.5371,
"step": 330
},
{
"epoch": 1.07,
"grad_norm": 10.92546558380127,
"learning_rate": 4.656084656084656e-05,
"loss": 1.6233,
"step": 340
},
{
"epoch": 1.07,
"grad_norm": 24.024572372436523,
"learning_rate": 4.62962962962963e-05,
"loss": 1.7803,
"step": 350
},
{
"epoch": 1.08,
"grad_norm": 22.18260383605957,
"learning_rate": 4.603174603174603e-05,
"loss": 2.0079,
"step": 360
},
{
"epoch": 1.08,
"grad_norm": 23.039701461791992,
"learning_rate": 4.576719576719577e-05,
"loss": 1.582,
"step": 370
},
{
"epoch": 1.09,
"grad_norm": 20.918445587158203,
"learning_rate": 4.55026455026455e-05,
"loss": 1.9383,
"step": 380
},
{
"epoch": 1.09,
"grad_norm": 20.540185928344727,
"learning_rate": 4.523809523809524e-05,
"loss": 2.0142,
"step": 390
},
{
"epoch": 1.09,
"eval_accuracy": 0.1951219512195122,
"eval_loss": 2.294609546661377,
"eval_runtime": 97.734,
"eval_samples_per_second": 0.42,
"eval_steps_per_second": 0.215,
"step": 392
},
{
"epoch": 2.0,
"grad_norm": 20.112930297851562,
"learning_rate": 4.4973544973544974e-05,
"loss": 1.281,
"step": 400
},
{
"epoch": 2.01,
"grad_norm": 16.186555862426758,
"learning_rate": 4.470899470899471e-05,
"loss": 1.1811,
"step": 410
},
{
"epoch": 2.01,
"grad_norm": 17.308746337890625,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.6046,
"step": 420
},
{
"epoch": 2.02,
"grad_norm": 15.936869621276855,
"learning_rate": 4.417989417989418e-05,
"loss": 1.1209,
"step": 430
},
{
"epoch": 2.02,
"grad_norm": 18.442317962646484,
"learning_rate": 4.391534391534391e-05,
"loss": 1.613,
"step": 440
},
{
"epoch": 2.03,
"grad_norm": 25.79071044921875,
"learning_rate": 4.3650793650793655e-05,
"loss": 1.1109,
"step": 450
},
{
"epoch": 2.03,
"grad_norm": 17.710105895996094,
"learning_rate": 4.3386243386243384e-05,
"loss": 1.0664,
"step": 460
},
{
"epoch": 2.04,
"grad_norm": 14.870391845703125,
"learning_rate": 4.312169312169313e-05,
"loss": 1.2283,
"step": 470
},
{
"epoch": 2.04,
"grad_norm": 10.361430168151855,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.9415,
"step": 480
},
{
"epoch": 2.05,
"grad_norm": 22.23798179626465,
"learning_rate": 4.259259259259259e-05,
"loss": 1.3095,
"step": 490
},
{
"epoch": 2.05,
"grad_norm": 15.113299369812012,
"learning_rate": 4.232804232804233e-05,
"loss": 1.2173,
"step": 500
},
{
"epoch": 2.06,
"grad_norm": 22.383806228637695,
"learning_rate": 4.2063492063492065e-05,
"loss": 1.6995,
"step": 510
},
{
"epoch": 2.06,
"grad_norm": 18.106990814208984,
"learning_rate": 4.17989417989418e-05,
"loss": 1.5246,
"step": 520
},
{
"epoch": 2.07,
"grad_norm": 17.58225440979004,
"learning_rate": 4.153439153439154e-05,
"loss": 1.6346,
"step": 530
},
{
"epoch": 2.07,
"grad_norm": 20.450359344482422,
"learning_rate": 4.126984126984127e-05,
"loss": 1.402,
"step": 540
},
{
"epoch": 2.08,
"grad_norm": 19.060672760009766,
"learning_rate": 4.100529100529101e-05,
"loss": 1.3466,
"step": 550
},
{
"epoch": 2.08,
"grad_norm": 19.76554298400879,
"learning_rate": 4.074074074074074e-05,
"loss": 1.3471,
"step": 560
},
{
"epoch": 2.08,
"grad_norm": 19.519485473632812,
"learning_rate": 4.047619047619048e-05,
"loss": 1.3535,
"step": 570
},
{
"epoch": 2.09,
"grad_norm": 27.53349494934082,
"learning_rate": 4.021164021164021e-05,
"loss": 1.2947,
"step": 580
},
{
"epoch": 2.09,
"eval_accuracy": 0.36585365853658536,
"eval_loss": 1.6997814178466797,
"eval_runtime": 97.5161,
"eval_samples_per_second": 0.42,
"eval_steps_per_second": 0.215,
"step": 588
},
{
"epoch": 3.0,
"grad_norm": 12.76953411102295,
"learning_rate": 3.9947089947089946e-05,
"loss": 1.0455,
"step": 590
},
{
"epoch": 3.01,
"grad_norm": 23.524166107177734,
"learning_rate": 3.968253968253968e-05,
"loss": 0.9096,
"step": 600
},
{
"epoch": 3.01,
"grad_norm": 16.023509979248047,
"learning_rate": 3.941798941798942e-05,
"loss": 0.7188,
"step": 610
},
{
"epoch": 3.02,
"grad_norm": 6.053380012512207,
"learning_rate": 3.9153439153439155e-05,
"loss": 1.0134,
"step": 620
},
{
"epoch": 3.02,
"grad_norm": 21.445356369018555,
"learning_rate": 3.888888888888889e-05,
"loss": 1.1377,
"step": 630
},
{
"epoch": 3.02,
"grad_norm": 2.62589168548584,
"learning_rate": 3.862433862433863e-05,
"loss": 0.7862,
"step": 640
},
{
"epoch": 3.03,
"grad_norm": 17.238386154174805,
"learning_rate": 3.835978835978836e-05,
"loss": 0.8987,
"step": 650
},
{
"epoch": 3.03,
"grad_norm": 21.664026260375977,
"learning_rate": 3.809523809523809e-05,
"loss": 0.8078,
"step": 660
},
{
"epoch": 3.04,
"grad_norm": 17.513721466064453,
"learning_rate": 3.7830687830687835e-05,
"loss": 1.0894,
"step": 670
},
{
"epoch": 3.04,
"grad_norm": 28.274599075317383,
"learning_rate": 3.7566137566137564e-05,
"loss": 0.8099,
"step": 680
},
{
"epoch": 3.05,
"grad_norm": 12.51369571685791,
"learning_rate": 3.730158730158731e-05,
"loss": 0.7437,
"step": 690
},
{
"epoch": 3.05,
"grad_norm": 18.50059700012207,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.966,
"step": 700
},
{
"epoch": 3.06,
"grad_norm": 16.658267974853516,
"learning_rate": 3.677248677248677e-05,
"loss": 0.9597,
"step": 710
},
{
"epoch": 3.06,
"grad_norm": 12.815518379211426,
"learning_rate": 3.650793650793651e-05,
"loss": 1.0438,
"step": 720
},
{
"epoch": 3.07,
"grad_norm": 10.553025245666504,
"learning_rate": 3.6243386243386245e-05,
"loss": 1.0394,
"step": 730
},
{
"epoch": 3.07,
"grad_norm": 19.39080238342285,
"learning_rate": 3.597883597883598e-05,
"loss": 0.7301,
"step": 740
},
{
"epoch": 3.08,
"grad_norm": 20.088712692260742,
"learning_rate": 3.571428571428572e-05,
"loss": 1.0226,
"step": 750
},
{
"epoch": 3.08,
"grad_norm": 21.335155487060547,
"learning_rate": 3.5449735449735446e-05,
"loss": 1.0559,
"step": 760
},
{
"epoch": 3.09,
"grad_norm": 13.45548152923584,
"learning_rate": 3.518518518518519e-05,
"loss": 0.7884,
"step": 770
},
{
"epoch": 3.09,
"grad_norm": 3.4905335903167725,
"learning_rate": 3.492063492063492e-05,
"loss": 0.8486,
"step": 780
},
{
"epoch": 3.09,
"eval_accuracy": 0.21951219512195122,
"eval_loss": 2.036907196044922,
"eval_runtime": 97.631,
"eval_samples_per_second": 0.42,
"eval_steps_per_second": 0.215,
"step": 784
},
{
"epoch": 4.0,
"grad_norm": 14.28947639465332,
"learning_rate": 3.465608465608466e-05,
"loss": 0.4492,
"step": 790
},
{
"epoch": 4.01,
"grad_norm": 3.365736722946167,
"learning_rate": 3.439153439153439e-05,
"loss": 0.3086,
"step": 800
},
{
"epoch": 4.01,
"grad_norm": 0.7734526991844177,
"learning_rate": 3.412698412698413e-05,
"loss": 0.6843,
"step": 810
},
{
"epoch": 4.02,
"grad_norm": 15.79121208190918,
"learning_rate": 3.386243386243386e-05,
"loss": 0.3582,
"step": 820
},
{
"epoch": 4.02,
"grad_norm": 8.443323135375977,
"learning_rate": 3.35978835978836e-05,
"loss": 0.6707,
"step": 830
},
{
"epoch": 4.03,
"grad_norm": 11.17960262298584,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.5133,
"step": 840
},
{
"epoch": 4.03,
"grad_norm": 15.559344291687012,
"learning_rate": 3.306878306878307e-05,
"loss": 0.4993,
"step": 850
},
{
"epoch": 4.04,
"grad_norm": 21.119354248046875,
"learning_rate": 3.280423280423281e-05,
"loss": 0.4755,
"step": 860
},
{
"epoch": 4.04,
"grad_norm": 19.267467498779297,
"learning_rate": 3.253968253968254e-05,
"loss": 0.4796,
"step": 870
},
{
"epoch": 4.05,
"grad_norm": 18.882789611816406,
"learning_rate": 3.227513227513227e-05,
"loss": 0.5674,
"step": 880
},
{
"epoch": 4.05,
"grad_norm": 21.429229736328125,
"learning_rate": 3.2010582010582015e-05,
"loss": 0.575,
"step": 890
},
{
"epoch": 4.06,
"grad_norm": 1.5254511833190918,
"learning_rate": 3.1746031746031745e-05,
"loss": 0.7652,
"step": 900
},
{
"epoch": 4.06,
"grad_norm": 27.608917236328125,
"learning_rate": 3.148148148148148e-05,
"loss": 0.4358,
"step": 910
},
{
"epoch": 4.06,
"grad_norm": 4.12730598449707,
"learning_rate": 3.121693121693122e-05,
"loss": 0.7179,
"step": 920
},
{
"epoch": 4.07,
"grad_norm": 0.7054173946380615,
"learning_rate": 3.095238095238095e-05,
"loss": 0.4201,
"step": 930
},
{
"epoch": 4.07,
"grad_norm": 6.384374618530273,
"learning_rate": 3.068783068783069e-05,
"loss": 0.523,
"step": 940
},
{
"epoch": 4.08,
"grad_norm": 13.877901077270508,
"learning_rate": 3.0423280423280425e-05,
"loss": 1.1374,
"step": 950
},
{
"epoch": 4.08,
"grad_norm": 11.547405242919922,
"learning_rate": 3.0158730158730158e-05,
"loss": 0.4007,
"step": 960
},
{
"epoch": 4.09,
"grad_norm": 17.14991569519043,
"learning_rate": 2.9894179894179897e-05,
"loss": 0.6621,
"step": 970
},
{
"epoch": 4.09,
"grad_norm": 4.131192684173584,
"learning_rate": 2.962962962962963e-05,
"loss": 0.2636,
"step": 980
},
{
"epoch": 4.09,
"eval_accuracy": 0.3170731707317073,
"eval_loss": 1.9747871160507202,
"eval_runtime": 96.8808,
"eval_samples_per_second": 0.423,
"eval_steps_per_second": 0.217,
"step": 980
},
{
"epoch": 5.0,
"grad_norm": 31.432811737060547,
"learning_rate": 2.9365079365079366e-05,
"loss": 0.4902,
"step": 990
},
{
"epoch": 5.01,
"grad_norm": 3.502074956893921,
"learning_rate": 2.91005291005291e-05,
"loss": 0.1312,
"step": 1000
},
{
"epoch": 5.01,
"grad_norm": 11.63001823425293,
"learning_rate": 2.8835978835978838e-05,
"loss": 0.2772,
"step": 1010
},
{
"epoch": 5.02,
"grad_norm": 18.965436935424805,
"learning_rate": 2.857142857142857e-05,
"loss": 0.6744,
"step": 1020
},
{
"epoch": 5.02,
"grad_norm": 0.373548686504364,
"learning_rate": 2.830687830687831e-05,
"loss": 0.1485,
"step": 1030
},
{
"epoch": 5.03,
"grad_norm": 1.2408517599105835,
"learning_rate": 2.8042328042328043e-05,
"loss": 0.5443,
"step": 1040
},
{
"epoch": 5.03,
"grad_norm": 7.223196506500244,
"learning_rate": 2.777777777777778e-05,
"loss": 0.212,
"step": 1050
},
{
"epoch": 5.04,
"grad_norm": 0.37929368019104004,
"learning_rate": 2.7513227513227512e-05,
"loss": 0.6033,
"step": 1060
},
{
"epoch": 5.04,
"grad_norm": 1.4948322772979736,
"learning_rate": 2.724867724867725e-05,
"loss": 0.3575,
"step": 1070
},
{
"epoch": 5.05,
"grad_norm": 23.476411819458008,
"learning_rate": 2.6984126984126984e-05,
"loss": 0.4821,
"step": 1080
},
{
"epoch": 5.05,
"grad_norm": 21.29241180419922,
"learning_rate": 2.6719576719576723e-05,
"loss": 0.2678,
"step": 1090
},
{
"epoch": 5.06,
"grad_norm": 1.714184045791626,
"learning_rate": 2.6455026455026456e-05,
"loss": 0.3042,
"step": 1100
},
{
"epoch": 5.06,
"grad_norm": 17.73127555847168,
"learning_rate": 2.6190476190476192e-05,
"loss": 0.4311,
"step": 1110
},
{
"epoch": 5.07,
"grad_norm": 12.709320068359375,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.4813,
"step": 1120
},
{
"epoch": 5.07,
"grad_norm": 4.548611640930176,
"learning_rate": 2.5661375661375664e-05,
"loss": 0.1838,
"step": 1130
},
{
"epoch": 5.08,
"grad_norm": 27.118480682373047,
"learning_rate": 2.5396825396825397e-05,
"loss": 0.3369,
"step": 1140
},
{
"epoch": 5.08,
"grad_norm": 20.047382354736328,
"learning_rate": 2.5132275132275137e-05,
"loss": 0.1552,
"step": 1150
},
{
"epoch": 5.09,
"grad_norm": 2.572249412536621,
"learning_rate": 2.4867724867724866e-05,
"loss": 0.2454,
"step": 1160
},
{
"epoch": 5.09,
"grad_norm": 16.45228385925293,
"learning_rate": 2.4603174603174602e-05,
"loss": 0.2805,
"step": 1170
},
{
"epoch": 5.09,
"eval_accuracy": 0.36585365853658536,
"eval_loss": 2.356266975402832,
"eval_runtime": 97.8432,
"eval_samples_per_second": 0.419,
"eval_steps_per_second": 0.215,
"step": 1176
},
{
"epoch": 6.0,
"grad_norm": 1.568260908126831,
"learning_rate": 2.4338624338624338e-05,
"loss": 0.3522,
"step": 1180
},
{
"epoch": 6.01,
"grad_norm": 0.9600222706794739,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.199,
"step": 1190
},
{
"epoch": 6.01,
"grad_norm": 15.724946975708008,
"learning_rate": 2.380952380952381e-05,
"loss": 0.1719,
"step": 1200
},
{
"epoch": 6.02,
"grad_norm": 1.4226208925247192,
"learning_rate": 2.3544973544973546e-05,
"loss": 0.1337,
"step": 1210
},
{
"epoch": 6.02,
"grad_norm": 0.574043333530426,
"learning_rate": 2.328042328042328e-05,
"loss": 0.0584,
"step": 1220
},
{
"epoch": 6.03,
"grad_norm": 1.265620231628418,
"learning_rate": 2.3015873015873015e-05,
"loss": 0.1366,
"step": 1230
},
{
"epoch": 6.03,
"grad_norm": 0.19037747383117676,
"learning_rate": 2.275132275132275e-05,
"loss": 0.052,
"step": 1240
},
{
"epoch": 6.04,
"grad_norm": 2.8592171669006348,
"learning_rate": 2.2486772486772487e-05,
"loss": 0.0912,
"step": 1250
},
{
"epoch": 6.04,
"grad_norm": 32.83649444580078,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.6816,
"step": 1260
},
{
"epoch": 6.04,
"grad_norm": 26.58733558654785,
"learning_rate": 2.1957671957671956e-05,
"loss": 0.1562,
"step": 1270
},
{
"epoch": 6.05,
"grad_norm": 0.8090476989746094,
"learning_rate": 2.1693121693121692e-05,
"loss": 0.0661,
"step": 1280
},
{
"epoch": 6.05,
"grad_norm": 12.49517822265625,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.097,
"step": 1290
},
{
"epoch": 6.06,
"grad_norm": 1.2869700193405151,
"learning_rate": 2.1164021164021164e-05,
"loss": 0.2118,
"step": 1300
},
{
"epoch": 6.06,
"grad_norm": 1.337388038635254,
"learning_rate": 2.08994708994709e-05,
"loss": 0.1251,
"step": 1310
},
{
"epoch": 6.07,
"grad_norm": 15.6812162399292,
"learning_rate": 2.0634920634920636e-05,
"loss": 0.2407,
"step": 1320
},
{
"epoch": 6.07,
"grad_norm": 4.392534255981445,
"learning_rate": 2.037037037037037e-05,
"loss": 0.1948,
"step": 1330
},
{
"epoch": 6.08,
"grad_norm": 3.4775161743164062,
"learning_rate": 2.0105820105820105e-05,
"loss": 0.056,
"step": 1340
},
{
"epoch": 6.08,
"grad_norm": 0.3342803716659546,
"learning_rate": 1.984126984126984e-05,
"loss": 0.2194,
"step": 1350
},
{
"epoch": 6.09,
"grad_norm": 2.376802444458008,
"learning_rate": 1.9576719576719577e-05,
"loss": 0.1855,
"step": 1360
},
{
"epoch": 6.09,
"grad_norm": 0.15384900569915771,
"learning_rate": 1.9312169312169313e-05,
"loss": 0.0923,
"step": 1370
},
{
"epoch": 6.09,
"eval_accuracy": 0.36585365853658536,
"eval_loss": 2.3754231929779053,
"eval_runtime": 98.5579,
"eval_samples_per_second": 0.416,
"eval_steps_per_second": 0.213,
"step": 1372
},
{
"epoch": 7.0,
"grad_norm": 0.41682931780815125,
"learning_rate": 1.9047619047619046e-05,
"loss": 0.097,
"step": 1380
},
{
"epoch": 7.01,
"grad_norm": 0.6600458025932312,
"learning_rate": 1.8783068783068782e-05,
"loss": 0.1678,
"step": 1390
},
{
"epoch": 7.01,
"grad_norm": 6.393650531768799,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0715,
"step": 1400
},
{
"epoch": 7.02,
"grad_norm": 0.031914032995700836,
"learning_rate": 1.8253968253968254e-05,
"loss": 0.1708,
"step": 1410
},
{
"epoch": 7.02,
"grad_norm": 0.04353192821145058,
"learning_rate": 1.798941798941799e-05,
"loss": 0.1562,
"step": 1420
},
{
"epoch": 7.03,
"grad_norm": 0.5575181841850281,
"learning_rate": 1.7724867724867723e-05,
"loss": 0.0242,
"step": 1430
},
{
"epoch": 7.03,
"grad_norm": 0.2302282154560089,
"learning_rate": 1.746031746031746e-05,
"loss": 0.039,
"step": 1440
},
{
"epoch": 7.04,
"grad_norm": 0.12757529318332672,
"learning_rate": 1.7195767195767195e-05,
"loss": 0.1855,
"step": 1450
},
{
"epoch": 7.04,
"grad_norm": 27.270219802856445,
"learning_rate": 1.693121693121693e-05,
"loss": 0.204,
"step": 1460
},
{
"epoch": 7.05,
"grad_norm": 0.17601485550403595,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.1326,
"step": 1470
},
{
"epoch": 7.05,
"grad_norm": 7.770025253295898,
"learning_rate": 1.6402116402116404e-05,
"loss": 0.0323,
"step": 1480
},
{
"epoch": 7.06,
"grad_norm": 0.36580923199653625,
"learning_rate": 1.6137566137566136e-05,
"loss": 0.1058,
"step": 1490
},
{
"epoch": 7.06,
"grad_norm": 4.40885591506958,
"learning_rate": 1.5873015873015872e-05,
"loss": 0.0768,
"step": 1500
},
{
"epoch": 7.07,
"grad_norm": 0.18270359933376312,
"learning_rate": 1.560846560846561e-05,
"loss": 0.2691,
"step": 1510
},
{
"epoch": 7.07,
"grad_norm": 2.5134904384613037,
"learning_rate": 1.5343915343915344e-05,
"loss": 0.1627,
"step": 1520
},
{
"epoch": 7.08,
"grad_norm": 0.8382402658462524,
"learning_rate": 1.5079365079365079e-05,
"loss": 0.0194,
"step": 1530
},
{
"epoch": 7.08,
"grad_norm": 22.632375717163086,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.2512,
"step": 1540
},
{
"epoch": 7.08,
"grad_norm": 0.267007052898407,
"learning_rate": 1.455026455026455e-05,
"loss": 0.0539,
"step": 1550
},
{
"epoch": 7.09,
"grad_norm": 0.03810029849410057,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.1543,
"step": 1560
},
{
"epoch": 7.09,
"eval_accuracy": 0.3170731707317073,
"eval_loss": 2.7736659049987793,
"eval_runtime": 98.6426,
"eval_samples_per_second": 0.416,
"eval_steps_per_second": 0.213,
"step": 1568
},
{
"epoch": 8.0,
"grad_norm": 1.9330545663833618,
"learning_rate": 1.4021164021164022e-05,
"loss": 0.3434,
"step": 1570
},
{
"epoch": 8.01,
"grad_norm": 0.2689897119998932,
"learning_rate": 1.3756613756613756e-05,
"loss": 0.0472,
"step": 1580
},
{
"epoch": 8.01,
"grad_norm": 0.09701512008905411,
"learning_rate": 1.3492063492063492e-05,
"loss": 0.0198,
"step": 1590
},
{
"epoch": 8.02,
"grad_norm": 0.07738398015499115,
"learning_rate": 1.3227513227513228e-05,
"loss": 0.01,
"step": 1600
},
{
"epoch": 8.02,
"grad_norm": 0.07364221662282944,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.0218,
"step": 1610
},
{
"epoch": 8.02,
"grad_norm": 0.06300120055675507,
"learning_rate": 1.2698412698412699e-05,
"loss": 0.0517,
"step": 1620
},
{
"epoch": 8.03,
"grad_norm": 0.2190474420785904,
"learning_rate": 1.2433862433862433e-05,
"loss": 0.0538,
"step": 1630
},
{
"epoch": 8.03,
"grad_norm": 13.809772491455078,
"learning_rate": 1.2169312169312169e-05,
"loss": 0.046,
"step": 1640
},
{
"epoch": 8.04,
"grad_norm": 5.0279645919799805,
"learning_rate": 1.1904761904761905e-05,
"loss": 0.1286,
"step": 1650
},
{
"epoch": 8.04,
"grad_norm": 0.25265267491340637,
"learning_rate": 1.164021164021164e-05,
"loss": 0.0477,
"step": 1660
},
{
"epoch": 8.05,
"grad_norm": 12.444536209106445,
"learning_rate": 1.1375661375661376e-05,
"loss": 0.0547,
"step": 1670
},
{
"epoch": 8.05,
"grad_norm": 0.8364056348800659,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0147,
"step": 1680
},
{
"epoch": 8.06,
"grad_norm": 0.10071144253015518,
"learning_rate": 1.0846560846560846e-05,
"loss": 0.0119,
"step": 1690
},
{
"epoch": 8.06,
"grad_norm": 0.4113464653491974,
"learning_rate": 1.0582010582010582e-05,
"loss": 0.0098,
"step": 1700
},
{
"epoch": 8.07,
"grad_norm": 0.29613247513771057,
"learning_rate": 1.0317460317460318e-05,
"loss": 0.0096,
"step": 1710
},
{
"epoch": 8.07,
"grad_norm": 0.28390398621559143,
"learning_rate": 1.0052910052910053e-05,
"loss": 0.0841,
"step": 1720
},
{
"epoch": 8.08,
"grad_norm": 2.4915988445281982,
"learning_rate": 9.788359788359789e-06,
"loss": 0.0831,
"step": 1730
},
{
"epoch": 8.08,
"grad_norm": 0.16037584841251373,
"learning_rate": 9.523809523809523e-06,
"loss": 0.0245,
"step": 1740
},
{
"epoch": 8.09,
"grad_norm": 0.21573053300380707,
"learning_rate": 9.259259259259259e-06,
"loss": 0.206,
"step": 1750
},
{
"epoch": 8.09,
"grad_norm": 0.03257734328508377,
"learning_rate": 8.994708994708995e-06,
"loss": 0.0387,
"step": 1760
},
{
"epoch": 8.09,
"eval_accuracy": 0.36585365853658536,
"eval_loss": 2.6675825119018555,
"eval_runtime": 98.7265,
"eval_samples_per_second": 0.415,
"eval_steps_per_second": 0.213,
"step": 1764
},
{
"epoch": 9.0,
"grad_norm": 0.25477078557014465,
"learning_rate": 8.73015873015873e-06,
"loss": 0.0048,
"step": 1770
},
{
"epoch": 9.01,
"grad_norm": 0.08968080580234528,
"learning_rate": 8.465608465608466e-06,
"loss": 0.0213,
"step": 1780
},
{
"epoch": 9.01,
"grad_norm": 0.12380950897932053,
"learning_rate": 8.201058201058202e-06,
"loss": 0.0471,
"step": 1790
},
{
"epoch": 9.02,
"grad_norm": 0.1933353841304779,
"learning_rate": 7.936507936507936e-06,
"loss": 0.3223,
"step": 1800
},
{
"epoch": 9.02,
"grad_norm": 0.09426797181367874,
"learning_rate": 7.671957671957672e-06,
"loss": 0.0566,
"step": 1810
},
{
"epoch": 9.03,
"grad_norm": 0.1309937983751297,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.0268,
"step": 1820
},
{
"epoch": 9.03,
"grad_norm": 0.7364773750305176,
"learning_rate": 7.142857142857143e-06,
"loss": 0.0418,
"step": 1830
},
{
"epoch": 9.04,
"grad_norm": 0.38073354959487915,
"learning_rate": 6.878306878306878e-06,
"loss": 0.0053,
"step": 1840
},
{
"epoch": 9.04,
"grad_norm": 0.09456183016300201,
"learning_rate": 6.613756613756614e-06,
"loss": 0.07,
"step": 1850
},
{
"epoch": 9.05,
"grad_norm": 0.9321669936180115,
"learning_rate": 6.349206349206349e-06,
"loss": 0.0108,
"step": 1860
},
{
"epoch": 9.05,
"grad_norm": 0.7253817915916443,
"learning_rate": 6.0846560846560845e-06,
"loss": 0.0041,
"step": 1870
},
{
"epoch": 9.06,
"grad_norm": 3.511643886566162,
"learning_rate": 5.82010582010582e-06,
"loss": 0.057,
"step": 1880
},
{
"epoch": 9.06,
"grad_norm": 0.09484612196683884,
"learning_rate": 5.555555555555556e-06,
"loss": 0.05,
"step": 1890
},
{
"epoch": 9.06,
"grad_norm": 0.05964544415473938,
"learning_rate": 5.291005291005291e-06,
"loss": 0.0089,
"step": 1900
},
{
"epoch": 9.07,
"grad_norm": 0.03321171924471855,
"learning_rate": 5.026455026455026e-06,
"loss": 0.0041,
"step": 1910
},
{
"epoch": 9.07,
"grad_norm": 0.12247146666049957,
"learning_rate": 4.7619047619047615e-06,
"loss": 0.0272,
"step": 1920
},
{
"epoch": 9.08,
"grad_norm": 6.011055946350098,
"learning_rate": 4.497354497354498e-06,
"loss": 0.0125,
"step": 1930
},
{
"epoch": 9.08,
"grad_norm": 0.07328727841377258,
"learning_rate": 4.232804232804233e-06,
"loss": 0.0493,
"step": 1940
},
{
"epoch": 9.09,
"grad_norm": 0.03230283781886101,
"learning_rate": 3.968253968253968e-06,
"loss": 0.0104,
"step": 1950
},
{
"epoch": 9.09,
"grad_norm": 0.10232985019683838,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.0101,
"step": 1960
},
{
"epoch": 9.09,
"eval_accuracy": 0.34146341463414637,
"eval_loss": 2.7895021438598633,
"eval_runtime": 98.3982,
"eval_samples_per_second": 0.417,
"eval_steps_per_second": 0.213,
"step": 1960
},
{
"epoch": 10.0,
"grad_norm": 5.483851909637451,
"learning_rate": 3.439153439153439e-06,
"loss": 0.2193,
"step": 1970
},
{
"epoch": 10.01,
"grad_norm": 2.402463912963867,
"learning_rate": 3.1746031746031746e-06,
"loss": 0.005,
"step": 1980
},
{
"epoch": 10.01,
"grad_norm": 0.08023607730865479,
"learning_rate": 2.91005291005291e-06,
"loss": 0.0125,
"step": 1990
},
{
"epoch": 10.02,
"grad_norm": 0.023654110729694366,
"learning_rate": 2.6455026455026455e-06,
"loss": 0.0094,
"step": 2000
},
{
"epoch": 10.02,
"grad_norm": 0.052570246160030365,
"learning_rate": 2.3809523809523808e-06,
"loss": 0.11,
"step": 2010
},
{
"epoch": 10.03,
"grad_norm": 0.08791643381118774,
"learning_rate": 2.1164021164021164e-06,
"loss": 0.0189,
"step": 2020
},
{
"epoch": 10.03,
"grad_norm": 0.21485354006290436,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.0285,
"step": 2030
},
{
"epoch": 10.04,
"grad_norm": 0.09601633995771408,
"learning_rate": 1.5873015873015873e-06,
"loss": 0.0326,
"step": 2040
},
{
"epoch": 10.04,
"grad_norm": 0.05465522035956383,
"learning_rate": 1.3227513227513228e-06,
"loss": 0.0235,
"step": 2050
},
{
"epoch": 10.05,
"grad_norm": 0.12677225470542908,
"learning_rate": 1.0582010582010582e-06,
"loss": 0.0122,
"step": 2060
},
{
"epoch": 10.05,
"grad_norm": 0.05187565088272095,
"learning_rate": 7.936507936507937e-07,
"loss": 0.0277,
"step": 2070
},
{
"epoch": 10.06,
"grad_norm": 0.02051164023578167,
"learning_rate": 5.291005291005291e-07,
"loss": 0.1015,
"step": 2080
},
{
"epoch": 10.06,
"grad_norm": 0.02192351035773754,
"learning_rate": 2.6455026455026455e-07,
"loss": 0.1089,
"step": 2090
},
{
"epoch": 10.07,
"grad_norm": 0.19547943770885468,
"learning_rate": 0.0,
"loss": 0.0662,
"step": 2100
},
{
"epoch": 10.07,
"eval_accuracy": 0.34146341463414637,
"eval_loss": 2.772752523422241,
"eval_runtime": 100.1099,
"eval_samples_per_second": 0.41,
"eval_steps_per_second": 0.21,
"step": 2100
},
{
"epoch": 10.07,
"step": 2100,
"total_flos": 1.073544417178878e+19,
"train_loss": 0.6815972964820408,
"train_runtime": 32185.1971,
"train_samples_per_second": 0.13,
"train_steps_per_second": 0.065
},
{
"epoch": 10.07,
"eval_accuracy": 0.24271844660194175,
"eval_loss": 1.9485329389572144,
"eval_runtime": 275.3036,
"eval_samples_per_second": 0.374,
"eval_steps_per_second": 0.189,
"step": 2100
},
{
"epoch": 10.07,
"eval_accuracy": 0.24271844660194175,
"eval_loss": 1.9485328197479248,
"eval_runtime": 251.8324,
"eval_samples_per_second": 0.409,
"eval_steps_per_second": 0.206,
"step": 2100
}
],
"logging_steps": 10,
"max_steps": 2100,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 1.073544417178878e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}