organamnist-deit-base-finetuned / trainer_state.json
selmamalak's picture
Model save
d8657a9 verified
raw
history blame
90.1 kB
{
"best_metric": 0.9899861346479741,
"best_model_checkpoint": "deit-base-patch16-224-finetuned-lora-medmnistv2/checkpoint-4324",
"epoch": 9.990749306197966,
"eval_steps": 500,
"global_step": 5400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 2.755105972290039,
"learning_rate": 0.004990740740740741,
"loss": 1.8298,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 3.729515790939331,
"learning_rate": 0.004981481481481482,
"loss": 1.2318,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 1.507834792137146,
"learning_rate": 0.0049722222222222225,
"loss": 1.0656,
"step": 30
},
{
"epoch": 0.07,
"grad_norm": 2.6946210861206055,
"learning_rate": 0.004962962962962963,
"loss": 1.0045,
"step": 40
},
{
"epoch": 0.09,
"grad_norm": 1.5376859903335571,
"learning_rate": 0.004953703703703703,
"loss": 0.952,
"step": 50
},
{
"epoch": 0.11,
"grad_norm": 1.783308982849121,
"learning_rate": 0.004944444444444445,
"loss": 0.808,
"step": 60
},
{
"epoch": 0.13,
"grad_norm": 2.132023811340332,
"learning_rate": 0.004935185185185186,
"loss": 0.7731,
"step": 70
},
{
"epoch": 0.15,
"grad_norm": 1.7192047834396362,
"learning_rate": 0.004925925925925926,
"loss": 0.8522,
"step": 80
},
{
"epoch": 0.17,
"grad_norm": 1.745439052581787,
"learning_rate": 0.004916666666666666,
"loss": 0.7773,
"step": 90
},
{
"epoch": 0.19,
"grad_norm": 1.608622670173645,
"learning_rate": 0.004907407407407408,
"loss": 0.7742,
"step": 100
},
{
"epoch": 0.2,
"grad_norm": 1.5263983011245728,
"learning_rate": 0.004898148148148148,
"loss": 0.8207,
"step": 110
},
{
"epoch": 0.22,
"grad_norm": 1.6577465534210205,
"learning_rate": 0.004888888888888889,
"loss": 0.7417,
"step": 120
},
{
"epoch": 0.24,
"grad_norm": 2.3107540607452393,
"learning_rate": 0.00487962962962963,
"loss": 0.7652,
"step": 130
},
{
"epoch": 0.26,
"grad_norm": 1.8946397304534912,
"learning_rate": 0.00487037037037037,
"loss": 0.763,
"step": 140
},
{
"epoch": 0.28,
"grad_norm": 2.175830125808716,
"learning_rate": 0.004861111111111111,
"loss": 0.7303,
"step": 150
},
{
"epoch": 0.3,
"grad_norm": 1.8831381797790527,
"learning_rate": 0.004851851851851852,
"loss": 0.7505,
"step": 160
},
{
"epoch": 0.31,
"grad_norm": 2.2608139514923096,
"learning_rate": 0.004842592592592593,
"loss": 0.6704,
"step": 170
},
{
"epoch": 0.33,
"grad_norm": 1.5122900009155273,
"learning_rate": 0.004833333333333334,
"loss": 0.7428,
"step": 180
},
{
"epoch": 0.35,
"grad_norm": 1.939324975013733,
"learning_rate": 0.004824074074074074,
"loss": 0.7112,
"step": 190
},
{
"epoch": 0.37,
"grad_norm": 2.1237051486968994,
"learning_rate": 0.004814814814814814,
"loss": 0.7419,
"step": 200
},
{
"epoch": 0.39,
"grad_norm": 1.0236401557922363,
"learning_rate": 0.004805555555555556,
"loss": 0.6617,
"step": 210
},
{
"epoch": 0.41,
"grad_norm": 1.503143310546875,
"learning_rate": 0.004796296296296297,
"loss": 0.7201,
"step": 220
},
{
"epoch": 0.43,
"grad_norm": 1.937041163444519,
"learning_rate": 0.004787037037037037,
"loss": 0.6503,
"step": 230
},
{
"epoch": 0.44,
"grad_norm": 1.2516043186187744,
"learning_rate": 0.004777777777777778,
"loss": 0.6473,
"step": 240
},
{
"epoch": 0.46,
"grad_norm": 2.2114853858947754,
"learning_rate": 0.004768518518518518,
"loss": 0.6475,
"step": 250
},
{
"epoch": 0.48,
"grad_norm": 1.8521231412887573,
"learning_rate": 0.004759259259259259,
"loss": 0.6729,
"step": 260
},
{
"epoch": 0.5,
"grad_norm": 1.599143147468567,
"learning_rate": 0.00475,
"loss": 0.6437,
"step": 270
},
{
"epoch": 0.52,
"grad_norm": 1.5192047357559204,
"learning_rate": 0.004740740740740741,
"loss": 0.6731,
"step": 280
},
{
"epoch": 0.54,
"grad_norm": 1.874036431312561,
"learning_rate": 0.0047314814814814815,
"loss": 0.707,
"step": 290
},
{
"epoch": 0.56,
"grad_norm": 1.582754135131836,
"learning_rate": 0.004722222222222222,
"loss": 0.6477,
"step": 300
},
{
"epoch": 0.57,
"grad_norm": 1.7915743589401245,
"learning_rate": 0.004712962962962963,
"loss": 0.6226,
"step": 310
},
{
"epoch": 0.59,
"grad_norm": 2.2843360900878906,
"learning_rate": 0.004703703703703704,
"loss": 0.7287,
"step": 320
},
{
"epoch": 0.61,
"grad_norm": 1.8337290287017822,
"learning_rate": 0.004694444444444445,
"loss": 0.652,
"step": 330
},
{
"epoch": 0.63,
"grad_norm": 1.7287871837615967,
"learning_rate": 0.0046851851851851855,
"loss": 0.5503,
"step": 340
},
{
"epoch": 0.65,
"grad_norm": 1.7953327894210815,
"learning_rate": 0.004675925925925926,
"loss": 0.5608,
"step": 350
},
{
"epoch": 0.67,
"grad_norm": 3.0038208961486816,
"learning_rate": 0.004666666666666667,
"loss": 0.7144,
"step": 360
},
{
"epoch": 0.68,
"grad_norm": 1.8996288776397705,
"learning_rate": 0.004657407407407408,
"loss": 0.6965,
"step": 370
},
{
"epoch": 0.7,
"grad_norm": 3.314239740371704,
"learning_rate": 0.004648148148148148,
"loss": 0.7012,
"step": 380
},
{
"epoch": 0.72,
"grad_norm": 2.629176139831543,
"learning_rate": 0.004639814814814815,
"loss": 0.6977,
"step": 390
},
{
"epoch": 0.74,
"grad_norm": 1.343112826347351,
"learning_rate": 0.004630555555555555,
"loss": 0.7257,
"step": 400
},
{
"epoch": 0.76,
"grad_norm": 1.4948642253875732,
"learning_rate": 0.004621296296296296,
"loss": 0.7244,
"step": 410
},
{
"epoch": 0.78,
"grad_norm": 2.248868942260742,
"learning_rate": 0.004612037037037038,
"loss": 0.6938,
"step": 420
},
{
"epoch": 0.8,
"grad_norm": 1.4094672203063965,
"learning_rate": 0.004602777777777778,
"loss": 0.6338,
"step": 430
},
{
"epoch": 0.81,
"grad_norm": 1.7461600303649902,
"learning_rate": 0.0045935185185185185,
"loss": 0.623,
"step": 440
},
{
"epoch": 0.83,
"grad_norm": 1.9258304834365845,
"learning_rate": 0.004584259259259259,
"loss": 0.6681,
"step": 450
},
{
"epoch": 0.85,
"grad_norm": 2.1258721351623535,
"learning_rate": 0.004575,
"loss": 0.6885,
"step": 460
},
{
"epoch": 0.87,
"grad_norm": 1.852216362953186,
"learning_rate": 0.004565740740740741,
"loss": 0.6815,
"step": 470
},
{
"epoch": 0.89,
"grad_norm": 2.523413896560669,
"learning_rate": 0.004556481481481482,
"loss": 0.6267,
"step": 480
},
{
"epoch": 0.91,
"grad_norm": 2.133388042449951,
"learning_rate": 0.0045472222222222225,
"loss": 0.6133,
"step": 490
},
{
"epoch": 0.93,
"grad_norm": 2.732682228088379,
"learning_rate": 0.004537962962962963,
"loss": 0.7924,
"step": 500
},
{
"epoch": 0.94,
"grad_norm": 1.4026442766189575,
"learning_rate": 0.004528703703703704,
"loss": 0.5851,
"step": 510
},
{
"epoch": 0.96,
"grad_norm": 1.5229674577713013,
"learning_rate": 0.004519444444444444,
"loss": 0.6732,
"step": 520
},
{
"epoch": 0.98,
"grad_norm": 2.2832190990448,
"learning_rate": 0.004510185185185186,
"loss": 0.6453,
"step": 530
},
{
"epoch": 1.0,
"grad_norm": 1.637460708618164,
"learning_rate": 0.0045009259259259264,
"loss": 0.5849,
"step": 540
},
{
"epoch": 1.0,
"eval_accuracy": 0.9442304729625636,
"eval_f1": 0.9284990016049455,
"eval_loss": 0.18415337800979614,
"eval_precision": 0.9449212426032028,
"eval_recall": 0.92677489589942,
"eval_runtime": 53.3063,
"eval_samples_per_second": 121.768,
"eval_steps_per_second": 7.616,
"step": 540
},
{
"epoch": 1.02,
"grad_norm": 1.7663180828094482,
"learning_rate": 0.004491666666666666,
"loss": 0.6677,
"step": 550
},
{
"epoch": 1.04,
"grad_norm": 1.3648691177368164,
"learning_rate": 0.004482407407407407,
"loss": 0.6105,
"step": 560
},
{
"epoch": 1.05,
"grad_norm": 1.8574776649475098,
"learning_rate": 0.004473148148148149,
"loss": 0.6633,
"step": 570
},
{
"epoch": 1.07,
"grad_norm": 2.4903650283813477,
"learning_rate": 0.004463888888888889,
"loss": 0.5867,
"step": 580
},
{
"epoch": 1.09,
"grad_norm": 1.7570849657058716,
"learning_rate": 0.0044546296296296296,
"loss": 0.6011,
"step": 590
},
{
"epoch": 1.11,
"grad_norm": 1.796229600906372,
"learning_rate": 0.00444537037037037,
"loss": 0.565,
"step": 600
},
{
"epoch": 1.13,
"grad_norm": 1.2062430381774902,
"learning_rate": 0.004436111111111111,
"loss": 0.6809,
"step": 610
},
{
"epoch": 1.15,
"grad_norm": 1.641988754272461,
"learning_rate": 0.004426851851851852,
"loss": 0.6778,
"step": 620
},
{
"epoch": 1.17,
"grad_norm": 2.171339988708496,
"learning_rate": 0.004417592592592593,
"loss": 0.6091,
"step": 630
},
{
"epoch": 1.18,
"grad_norm": 1.634817123413086,
"learning_rate": 0.0044083333333333335,
"loss": 0.5903,
"step": 640
},
{
"epoch": 1.2,
"grad_norm": 1.8555738925933838,
"learning_rate": 0.004399074074074074,
"loss": 0.6029,
"step": 650
},
{
"epoch": 1.22,
"grad_norm": 1.2326161861419678,
"learning_rate": 0.004389814814814815,
"loss": 0.6358,
"step": 660
},
{
"epoch": 1.24,
"grad_norm": 1.5019110441207886,
"learning_rate": 0.004380555555555555,
"loss": 0.6457,
"step": 670
},
{
"epoch": 1.26,
"grad_norm": 2.464613199234009,
"learning_rate": 0.004371296296296297,
"loss": 0.6165,
"step": 680
},
{
"epoch": 1.28,
"grad_norm": 1.4740674495697021,
"learning_rate": 0.0043620370370370375,
"loss": 0.6324,
"step": 690
},
{
"epoch": 1.3,
"grad_norm": 2.2495017051696777,
"learning_rate": 0.0043527777777777775,
"loss": 0.6903,
"step": 700
},
{
"epoch": 1.31,
"grad_norm": 1.0371023416519165,
"learning_rate": 0.004343518518518519,
"loss": 0.7259,
"step": 710
},
{
"epoch": 1.33,
"grad_norm": 1.4419306516647339,
"learning_rate": 0.004334259259259259,
"loss": 0.5841,
"step": 720
},
{
"epoch": 1.35,
"grad_norm": 2.1435110569000244,
"learning_rate": 0.004325,
"loss": 0.5803,
"step": 730
},
{
"epoch": 1.37,
"grad_norm": 1.697944164276123,
"learning_rate": 0.004315740740740741,
"loss": 0.5959,
"step": 740
},
{
"epoch": 1.39,
"grad_norm": 2.058931589126587,
"learning_rate": 0.0043064814814814814,
"loss": 0.619,
"step": 750
},
{
"epoch": 1.41,
"grad_norm": 1.3956432342529297,
"learning_rate": 0.004297222222222222,
"loss": 0.6453,
"step": 760
},
{
"epoch": 1.42,
"grad_norm": 1.9578568935394287,
"learning_rate": 0.004287962962962963,
"loss": 0.5163,
"step": 770
},
{
"epoch": 1.44,
"grad_norm": 2.150716781616211,
"learning_rate": 0.004278703703703704,
"loss": 0.6397,
"step": 780
},
{
"epoch": 1.46,
"grad_norm": 2.469938278198242,
"learning_rate": 0.004269444444444445,
"loss": 0.6264,
"step": 790
},
{
"epoch": 1.48,
"grad_norm": 2.8132483959198,
"learning_rate": 0.004260185185185185,
"loss": 0.574,
"step": 800
},
{
"epoch": 1.5,
"grad_norm": 1.2079416513442993,
"learning_rate": 0.004250925925925926,
"loss": 0.5735,
"step": 810
},
{
"epoch": 1.52,
"grad_norm": 1.7141121625900269,
"learning_rate": 0.004241666666666667,
"loss": 0.6086,
"step": 820
},
{
"epoch": 1.54,
"grad_norm": 1.2220849990844727,
"learning_rate": 0.004232407407407408,
"loss": 0.6525,
"step": 830
},
{
"epoch": 1.55,
"grad_norm": 2.2290451526641846,
"learning_rate": 0.004223148148148149,
"loss": 0.6515,
"step": 840
},
{
"epoch": 1.57,
"grad_norm": 1.285662293434143,
"learning_rate": 0.0042138888888888885,
"loss": 0.6119,
"step": 850
},
{
"epoch": 1.59,
"grad_norm": 1.6702948808670044,
"learning_rate": 0.00420462962962963,
"loss": 0.5591,
"step": 860
},
{
"epoch": 1.61,
"grad_norm": 1.3915163278579712,
"learning_rate": 0.00419537037037037,
"loss": 0.5742,
"step": 870
},
{
"epoch": 1.63,
"grad_norm": 1.0722817182540894,
"learning_rate": 0.004186111111111111,
"loss": 0.5443,
"step": 880
},
{
"epoch": 1.65,
"grad_norm": 1.2701308727264404,
"learning_rate": 0.004176851851851852,
"loss": 0.5407,
"step": 890
},
{
"epoch": 1.67,
"grad_norm": 2.4829461574554443,
"learning_rate": 0.0041675925925925925,
"loss": 0.617,
"step": 900
},
{
"epoch": 1.68,
"grad_norm": 1.6107633113861084,
"learning_rate": 0.004158333333333333,
"loss": 0.6352,
"step": 910
},
{
"epoch": 1.7,
"grad_norm": 1.6750290393829346,
"learning_rate": 0.004149074074074074,
"loss": 0.6186,
"step": 920
},
{
"epoch": 1.72,
"grad_norm": 3.0813207626342773,
"learning_rate": 0.004139814814814815,
"loss": 0.6398,
"step": 930
},
{
"epoch": 1.74,
"grad_norm": 1.46220862865448,
"learning_rate": 0.004130555555555556,
"loss": 0.6248,
"step": 940
},
{
"epoch": 1.76,
"grad_norm": 1.8916534185409546,
"learning_rate": 0.0041212962962962965,
"loss": 0.5828,
"step": 950
},
{
"epoch": 1.78,
"grad_norm": 1.397637128829956,
"learning_rate": 0.004112037037037037,
"loss": 0.6127,
"step": 960
},
{
"epoch": 1.79,
"grad_norm": 1.5524698495864868,
"learning_rate": 0.004102777777777778,
"loss": 0.5434,
"step": 970
},
{
"epoch": 1.81,
"grad_norm": 1.357723355293274,
"learning_rate": 0.004093518518518519,
"loss": 0.565,
"step": 980
},
{
"epoch": 1.83,
"grad_norm": 2.0358312129974365,
"learning_rate": 0.004084259259259259,
"loss": 0.6351,
"step": 990
},
{
"epoch": 1.85,
"grad_norm": 2.148568868637085,
"learning_rate": 0.004075,
"loss": 0.6259,
"step": 1000
},
{
"epoch": 1.87,
"grad_norm": 1.5996747016906738,
"learning_rate": 0.004065740740740741,
"loss": 0.6821,
"step": 1010
},
{
"epoch": 1.89,
"grad_norm": 2.8041980266571045,
"learning_rate": 0.004056481481481481,
"loss": 0.6141,
"step": 1020
},
{
"epoch": 1.91,
"grad_norm": 2.5319840908050537,
"learning_rate": 0.004047222222222222,
"loss": 0.5978,
"step": 1030
},
{
"epoch": 1.92,
"grad_norm": 1.356850266456604,
"learning_rate": 0.004037962962962964,
"loss": 0.6444,
"step": 1040
},
{
"epoch": 1.94,
"grad_norm": 1.4726742506027222,
"learning_rate": 0.004028703703703704,
"loss": 0.5937,
"step": 1050
},
{
"epoch": 1.96,
"grad_norm": 2.525775194168091,
"learning_rate": 0.004019444444444444,
"loss": 0.5609,
"step": 1060
},
{
"epoch": 1.98,
"grad_norm": 2.2801449298858643,
"learning_rate": 0.004010185185185185,
"loss": 0.6315,
"step": 1070
},
{
"epoch": 2.0,
"grad_norm": 2.0325303077697754,
"learning_rate": 0.004000925925925926,
"loss": 0.6494,
"step": 1080
},
{
"epoch": 2.0,
"eval_accuracy": 0.9499306732398706,
"eval_f1": 0.9509024323746281,
"eval_loss": 0.14330759644508362,
"eval_precision": 0.9538743946507812,
"eval_recall": 0.9510137440302636,
"eval_runtime": 53.345,
"eval_samples_per_second": 121.68,
"eval_steps_per_second": 7.611,
"step": 1081
},
{
"epoch": 2.02,
"grad_norm": 1.2894580364227295,
"learning_rate": 0.003991666666666667,
"loss": 0.5896,
"step": 1090
},
{
"epoch": 2.04,
"grad_norm": 1.835950255393982,
"learning_rate": 0.003982407407407408,
"loss": 0.6027,
"step": 1100
},
{
"epoch": 2.05,
"grad_norm": 2.6905369758605957,
"learning_rate": 0.0039731481481481475,
"loss": 0.5896,
"step": 1110
},
{
"epoch": 2.07,
"grad_norm": 1.3313945531845093,
"learning_rate": 0.003963888888888889,
"loss": 0.5735,
"step": 1120
},
{
"epoch": 2.09,
"grad_norm": 1.584228754043579,
"learning_rate": 0.00395462962962963,
"loss": 0.5194,
"step": 1130
},
{
"epoch": 2.11,
"grad_norm": 1.3231537342071533,
"learning_rate": 0.00394537037037037,
"loss": 0.556,
"step": 1140
},
{
"epoch": 2.13,
"grad_norm": 2.2847185134887695,
"learning_rate": 0.003936111111111112,
"loss": 0.5564,
"step": 1150
},
{
"epoch": 2.15,
"grad_norm": 1.5955736637115479,
"learning_rate": 0.003926851851851852,
"loss": 0.6138,
"step": 1160
},
{
"epoch": 2.16,
"grad_norm": 1.2592884302139282,
"learning_rate": 0.003917592592592592,
"loss": 0.6048,
"step": 1170
},
{
"epoch": 2.18,
"grad_norm": 2.0293960571289062,
"learning_rate": 0.003908333333333333,
"loss": 0.5609,
"step": 1180
},
{
"epoch": 2.2,
"grad_norm": 1.5888597965240479,
"learning_rate": 0.0038990740740740743,
"loss": 0.6054,
"step": 1190
},
{
"epoch": 2.22,
"grad_norm": 2.2508180141448975,
"learning_rate": 0.003889814814814815,
"loss": 0.596,
"step": 1200
},
{
"epoch": 2.24,
"grad_norm": 1.29166841506958,
"learning_rate": 0.0038805555555555555,
"loss": 0.6829,
"step": 1210
},
{
"epoch": 2.26,
"grad_norm": 1.7769325971603394,
"learning_rate": 0.0038712962962962967,
"loss": 0.6646,
"step": 1220
},
{
"epoch": 2.28,
"grad_norm": 1.0019413232803345,
"learning_rate": 0.003862037037037037,
"loss": 0.563,
"step": 1230
},
{
"epoch": 2.29,
"grad_norm": 1.9900081157684326,
"learning_rate": 0.003852777777777778,
"loss": 0.5392,
"step": 1240
},
{
"epoch": 2.31,
"grad_norm": 2.0797948837280273,
"learning_rate": 0.0038435185185185182,
"loss": 0.5434,
"step": 1250
},
{
"epoch": 2.33,
"grad_norm": 1.445152997970581,
"learning_rate": 0.0038342592592592595,
"loss": 0.567,
"step": 1260
},
{
"epoch": 2.35,
"grad_norm": 1.6301777362823486,
"learning_rate": 0.0038250000000000003,
"loss": 0.6221,
"step": 1270
},
{
"epoch": 2.37,
"grad_norm": 1.7782893180847168,
"learning_rate": 0.0038157407407407406,
"loss": 0.5987,
"step": 1280
},
{
"epoch": 2.39,
"grad_norm": 1.4305975437164307,
"learning_rate": 0.0038064814814814814,
"loss": 0.5985,
"step": 1290
},
{
"epoch": 2.41,
"grad_norm": 1.4855763912200928,
"learning_rate": 0.0037972222222222227,
"loss": 0.608,
"step": 1300
},
{
"epoch": 2.42,
"grad_norm": 1.2296168804168701,
"learning_rate": 0.003787962962962963,
"loss": 0.6003,
"step": 1310
},
{
"epoch": 2.44,
"grad_norm": 1.654550313949585,
"learning_rate": 0.003778703703703704,
"loss": 0.5832,
"step": 1320
},
{
"epoch": 2.46,
"grad_norm": 1.3498139381408691,
"learning_rate": 0.0037694444444444446,
"loss": 0.5549,
"step": 1330
},
{
"epoch": 2.48,
"grad_norm": 1.7986209392547607,
"learning_rate": 0.0037601851851851854,
"loss": 0.6364,
"step": 1340
},
{
"epoch": 2.5,
"grad_norm": 2.0304336547851562,
"learning_rate": 0.0037509259259259258,
"loss": 0.5549,
"step": 1350
},
{
"epoch": 2.52,
"grad_norm": 1.7919527292251587,
"learning_rate": 0.0037416666666666666,
"loss": 0.6075,
"step": 1360
},
{
"epoch": 2.53,
"grad_norm": 0.9223549365997314,
"learning_rate": 0.003732407407407408,
"loss": 0.5013,
"step": 1370
},
{
"epoch": 2.55,
"grad_norm": 1.9208143949508667,
"learning_rate": 0.003723148148148148,
"loss": 0.596,
"step": 1380
},
{
"epoch": 2.57,
"grad_norm": 1.878339171409607,
"learning_rate": 0.003713888888888889,
"loss": 0.5878,
"step": 1390
},
{
"epoch": 2.59,
"grad_norm": 1.3851909637451172,
"learning_rate": 0.0037046296296296293,
"loss": 0.5433,
"step": 1400
},
{
"epoch": 2.61,
"grad_norm": 1.551100730895996,
"learning_rate": 0.0036953703703703706,
"loss": 0.5827,
"step": 1410
},
{
"epoch": 2.63,
"grad_norm": 1.2063732147216797,
"learning_rate": 0.0036861111111111114,
"loss": 0.5331,
"step": 1420
},
{
"epoch": 2.65,
"grad_norm": 1.514872431755066,
"learning_rate": 0.0036768518518518517,
"loss": 0.548,
"step": 1430
},
{
"epoch": 2.66,
"grad_norm": 1.643267035484314,
"learning_rate": 0.003667592592592593,
"loss": 0.598,
"step": 1440
},
{
"epoch": 2.68,
"grad_norm": 1.5592392683029175,
"learning_rate": 0.0036583333333333333,
"loss": 0.4851,
"step": 1450
},
{
"epoch": 2.7,
"grad_norm": 1.1929229497909546,
"learning_rate": 0.003649074074074074,
"loss": 0.5362,
"step": 1460
},
{
"epoch": 2.72,
"grad_norm": 1.610229253768921,
"learning_rate": 0.0036398148148148145,
"loss": 0.5826,
"step": 1470
},
{
"epoch": 2.74,
"grad_norm": 1.2494343519210815,
"learning_rate": 0.0036305555555555557,
"loss": 0.5206,
"step": 1480
},
{
"epoch": 2.76,
"grad_norm": 1.1670695543289185,
"learning_rate": 0.0036212962962962965,
"loss": 0.569,
"step": 1490
},
{
"epoch": 2.78,
"grad_norm": 1.3939517736434937,
"learning_rate": 0.003612037037037037,
"loss": 0.5018,
"step": 1500
},
{
"epoch": 2.79,
"grad_norm": 1.3553017377853394,
"learning_rate": 0.0036027777777777777,
"loss": 0.5167,
"step": 1510
},
{
"epoch": 2.81,
"grad_norm": 1.3546302318572998,
"learning_rate": 0.003593518518518519,
"loss": 0.4979,
"step": 1520
},
{
"epoch": 2.83,
"grad_norm": 1.2396551370620728,
"learning_rate": 0.0035842592592592593,
"loss": 0.4837,
"step": 1530
},
{
"epoch": 2.85,
"grad_norm": 1.103031873703003,
"learning_rate": 0.003575,
"loss": 0.6083,
"step": 1540
},
{
"epoch": 2.87,
"grad_norm": 1.642699122428894,
"learning_rate": 0.003565740740740741,
"loss": 0.5832,
"step": 1550
},
{
"epoch": 2.89,
"grad_norm": 1.550116777420044,
"learning_rate": 0.0035564814814814816,
"loss": 0.479,
"step": 1560
},
{
"epoch": 2.9,
"grad_norm": 1.745978832244873,
"learning_rate": 0.0035472222222222224,
"loss": 0.5643,
"step": 1570
},
{
"epoch": 2.92,
"grad_norm": 0.9975535273551941,
"learning_rate": 0.003537962962962963,
"loss": 0.5837,
"step": 1580
},
{
"epoch": 2.94,
"grad_norm": 1.8515207767486572,
"learning_rate": 0.003528703703703704,
"loss": 0.5637,
"step": 1590
},
{
"epoch": 2.96,
"grad_norm": 2.022219181060791,
"learning_rate": 0.0035194444444444444,
"loss": 0.5121,
"step": 1600
},
{
"epoch": 2.98,
"grad_norm": 1.8385446071624756,
"learning_rate": 0.003510185185185185,
"loss": 0.5244,
"step": 1610
},
{
"epoch": 3.0,
"grad_norm": 1.8141658306121826,
"learning_rate": 0.0035009259259259256,
"loss": 0.6059,
"step": 1620
},
{
"epoch": 3.0,
"eval_accuracy": 0.9562471113849946,
"eval_f1": 0.9592862858808702,
"eval_loss": 0.1171223372220993,
"eval_precision": 0.9658949301313363,
"eval_recall": 0.9569053041890571,
"eval_runtime": 53.6538,
"eval_samples_per_second": 120.979,
"eval_steps_per_second": 7.567,
"step": 1621
},
{
"epoch": 3.02,
"grad_norm": 1.811785340309143,
"learning_rate": 0.003491666666666667,
"loss": 0.5229,
"step": 1630
},
{
"epoch": 3.03,
"grad_norm": 0.8801301717758179,
"learning_rate": 0.0034824074074074076,
"loss": 0.5735,
"step": 1640
},
{
"epoch": 3.05,
"grad_norm": 1.5100030899047852,
"learning_rate": 0.003473148148148148,
"loss": 0.5581,
"step": 1650
},
{
"epoch": 3.07,
"grad_norm": 1.7307904958724976,
"learning_rate": 0.003463888888888889,
"loss": 0.4894,
"step": 1660
},
{
"epoch": 3.09,
"grad_norm": 2.264219045639038,
"learning_rate": 0.00345462962962963,
"loss": 0.5689,
"step": 1670
},
{
"epoch": 3.11,
"grad_norm": 1.2732802629470825,
"learning_rate": 0.0034453703703703703,
"loss": 0.5168,
"step": 1680
},
{
"epoch": 3.13,
"grad_norm": 1.5013095140457153,
"learning_rate": 0.003436111111111111,
"loss": 0.4907,
"step": 1690
},
{
"epoch": 3.15,
"grad_norm": 1.617488980293274,
"learning_rate": 0.003426851851851852,
"loss": 0.4795,
"step": 1700
},
{
"epoch": 3.16,
"grad_norm": 1.5595793724060059,
"learning_rate": 0.0034175925925925927,
"loss": 0.5816,
"step": 1710
},
{
"epoch": 3.18,
"grad_norm": 1.9643666744232178,
"learning_rate": 0.003408333333333333,
"loss": 0.5512,
"step": 1720
},
{
"epoch": 3.2,
"grad_norm": 1.1487085819244385,
"learning_rate": 0.003399074074074074,
"loss": 0.5253,
"step": 1730
},
{
"epoch": 3.22,
"grad_norm": 1.359946608543396,
"learning_rate": 0.003389814814814815,
"loss": 0.5056,
"step": 1740
},
{
"epoch": 3.24,
"grad_norm": 1.0156477689743042,
"learning_rate": 0.0033805555555555555,
"loss": 0.5154,
"step": 1750
},
{
"epoch": 3.26,
"grad_norm": 1.1285754442214966,
"learning_rate": 0.0033712962962962963,
"loss": 0.5344,
"step": 1760
},
{
"epoch": 3.27,
"grad_norm": 1.5709244012832642,
"learning_rate": 0.0033620370370370375,
"loss": 0.5373,
"step": 1770
},
{
"epoch": 3.29,
"grad_norm": 1.4874547719955444,
"learning_rate": 0.003352777777777778,
"loss": 0.4925,
"step": 1780
},
{
"epoch": 3.31,
"grad_norm": 2.197540044784546,
"learning_rate": 0.0033435185185185187,
"loss": 0.4636,
"step": 1790
},
{
"epoch": 3.33,
"grad_norm": 1.0891751050949097,
"learning_rate": 0.003334259259259259,
"loss": 0.5313,
"step": 1800
},
{
"epoch": 3.35,
"grad_norm": 1.4213520288467407,
"learning_rate": 0.0033250000000000003,
"loss": 0.5016,
"step": 1810
},
{
"epoch": 3.37,
"grad_norm": 1.420879602432251,
"learning_rate": 0.0033157407407407406,
"loss": 0.4622,
"step": 1820
},
{
"epoch": 3.39,
"grad_norm": 0.9189732074737549,
"learning_rate": 0.0033064814814814814,
"loss": 0.5148,
"step": 1830
},
{
"epoch": 3.4,
"grad_norm": 1.5656261444091797,
"learning_rate": 0.0032972222222222227,
"loss": 0.4722,
"step": 1840
},
{
"epoch": 3.42,
"grad_norm": 1.112702488899231,
"learning_rate": 0.003287962962962963,
"loss": 0.4968,
"step": 1850
},
{
"epoch": 3.44,
"grad_norm": 1.58233642578125,
"learning_rate": 0.003278703703703704,
"loss": 0.4366,
"step": 1860
},
{
"epoch": 3.46,
"grad_norm": 1.3384062051773071,
"learning_rate": 0.003269444444444444,
"loss": 0.4679,
"step": 1870
},
{
"epoch": 3.48,
"grad_norm": 1.0208711624145508,
"learning_rate": 0.0032601851851851854,
"loss": 0.5348,
"step": 1880
},
{
"epoch": 3.5,
"grad_norm": 0.9358342885971069,
"learning_rate": 0.003250925925925926,
"loss": 0.468,
"step": 1890
},
{
"epoch": 3.52,
"grad_norm": 1.3694192171096802,
"learning_rate": 0.0032416666666666666,
"loss": 0.4936,
"step": 1900
},
{
"epoch": 3.53,
"grad_norm": 1.218279242515564,
"learning_rate": 0.0032324074074074074,
"loss": 0.5086,
"step": 1910
},
{
"epoch": 3.55,
"grad_norm": 1.1621546745300293,
"learning_rate": 0.0032231481481481486,
"loss": 0.4918,
"step": 1920
},
{
"epoch": 3.57,
"grad_norm": 1.2146507501602173,
"learning_rate": 0.003213888888888889,
"loss": 0.488,
"step": 1930
},
{
"epoch": 3.59,
"grad_norm": 1.1717473268508911,
"learning_rate": 0.0032046296296296298,
"loss": 0.5438,
"step": 1940
},
{
"epoch": 3.61,
"grad_norm": 0.9260009527206421,
"learning_rate": 0.0031953703703703706,
"loss": 0.4683,
"step": 1950
},
{
"epoch": 3.63,
"grad_norm": 1.7548483610153198,
"learning_rate": 0.0031861111111111113,
"loss": 0.5116,
"step": 1960
},
{
"epoch": 3.64,
"grad_norm": 1.603769302368164,
"learning_rate": 0.0031768518518518517,
"loss": 0.506,
"step": 1970
},
{
"epoch": 3.66,
"grad_norm": 1.4770747423171997,
"learning_rate": 0.0031675925925925925,
"loss": 0.4877,
"step": 1980
},
{
"epoch": 3.68,
"grad_norm": 1.8213857412338257,
"learning_rate": 0.0031583333333333337,
"loss": 0.5239,
"step": 1990
},
{
"epoch": 3.7,
"grad_norm": 1.074977993965149,
"learning_rate": 0.003149074074074074,
"loss": 0.4655,
"step": 2000
},
{
"epoch": 3.72,
"grad_norm": 1.3410956859588623,
"learning_rate": 0.003139814814814815,
"loss": 0.4097,
"step": 2010
},
{
"epoch": 3.74,
"grad_norm": 0.6797301173210144,
"learning_rate": 0.0031305555555555553,
"loss": 0.5193,
"step": 2020
},
{
"epoch": 3.76,
"grad_norm": 1.1780236959457397,
"learning_rate": 0.0031212962962962965,
"loss": 0.438,
"step": 2030
},
{
"epoch": 3.77,
"grad_norm": 1.4561818838119507,
"learning_rate": 0.0031120370370370373,
"loss": 0.5156,
"step": 2040
},
{
"epoch": 3.79,
"grad_norm": 1.404532790184021,
"learning_rate": 0.0031027777777777777,
"loss": 0.4319,
"step": 2050
},
{
"epoch": 3.81,
"grad_norm": 1.453012466430664,
"learning_rate": 0.003093518518518519,
"loss": 0.4678,
"step": 2060
},
{
"epoch": 3.83,
"grad_norm": 1.7319668531417847,
"learning_rate": 0.0030842592592592592,
"loss": 0.4567,
"step": 2070
},
{
"epoch": 3.85,
"grad_norm": 1.718991994857788,
"learning_rate": 0.003075,
"loss": 0.5631,
"step": 2080
},
{
"epoch": 3.87,
"grad_norm": 1.4493403434753418,
"learning_rate": 0.0030657407407407404,
"loss": 0.5046,
"step": 2090
},
{
"epoch": 3.89,
"grad_norm": 1.5228378772735596,
"learning_rate": 0.0030564814814814816,
"loss": 0.4583,
"step": 2100
},
{
"epoch": 3.9,
"grad_norm": 1.3635989427566528,
"learning_rate": 0.0030472222222222224,
"loss": 0.4678,
"step": 2110
},
{
"epoch": 3.92,
"grad_norm": 1.7252169847488403,
"learning_rate": 0.003037962962962963,
"loss": 0.4817,
"step": 2120
},
{
"epoch": 3.94,
"grad_norm": 1.4143972396850586,
"learning_rate": 0.0030287037037037036,
"loss": 0.4663,
"step": 2130
},
{
"epoch": 3.96,
"grad_norm": 1.108224868774414,
"learning_rate": 0.003019444444444445,
"loss": 0.5028,
"step": 2140
},
{
"epoch": 3.98,
"grad_norm": 1.5944609642028809,
"learning_rate": 0.003010185185185185,
"loss": 0.5215,
"step": 2150
},
{
"epoch": 4.0,
"grad_norm": 0.9836990833282471,
"learning_rate": 0.003000925925925926,
"loss": 0.3547,
"step": 2160
},
{
"epoch": 4.0,
"eval_accuracy": 0.966569095670929,
"eval_f1": 0.9701817800594627,
"eval_loss": 0.09807376563549042,
"eval_precision": 0.970888445046177,
"eval_recall": 0.9711737953539892,
"eval_runtime": 53.6052,
"eval_samples_per_second": 121.089,
"eval_steps_per_second": 7.574,
"step": 2162
},
{
"epoch": 4.01,
"grad_norm": 1.2644659280776978,
"learning_rate": 0.0029916666666666668,
"loss": 0.5066,
"step": 2170
},
{
"epoch": 4.03,
"grad_norm": 1.5990442037582397,
"learning_rate": 0.0029824074074074076,
"loss": 0.5394,
"step": 2180
},
{
"epoch": 4.05,
"grad_norm": 1.1844220161437988,
"learning_rate": 0.0029731481481481484,
"loss": 0.5361,
"step": 2190
},
{
"epoch": 4.07,
"grad_norm": 1.003459095954895,
"learning_rate": 0.0029638888888888887,
"loss": 0.4965,
"step": 2200
},
{
"epoch": 4.09,
"grad_norm": 1.5128463506698608,
"learning_rate": 0.00295462962962963,
"loss": 0.4961,
"step": 2210
},
{
"epoch": 4.11,
"grad_norm": 1.4107407331466675,
"learning_rate": 0.0029453703703703703,
"loss": 0.4157,
"step": 2220
},
{
"epoch": 4.13,
"grad_norm": 1.0188244581222534,
"learning_rate": 0.002936111111111111,
"loss": 0.4571,
"step": 2230
},
{
"epoch": 4.14,
"grad_norm": 1.7941489219665527,
"learning_rate": 0.0029268518518518515,
"loss": 0.4037,
"step": 2240
},
{
"epoch": 4.16,
"grad_norm": 0.703614354133606,
"learning_rate": 0.0029175925925925927,
"loss": 0.469,
"step": 2250
},
{
"epoch": 4.18,
"grad_norm": 1.0244730710983276,
"learning_rate": 0.0029083333333333335,
"loss": 0.446,
"step": 2260
},
{
"epoch": 4.2,
"grad_norm": 1.940670132637024,
"learning_rate": 0.002899074074074074,
"loss": 0.4198,
"step": 2270
},
{
"epoch": 4.22,
"grad_norm": 1.3189454078674316,
"learning_rate": 0.002889814814814815,
"loss": 0.4627,
"step": 2280
},
{
"epoch": 4.24,
"grad_norm": 1.0493321418762207,
"learning_rate": 0.002880555555555556,
"loss": 0.4885,
"step": 2290
},
{
"epoch": 4.26,
"grad_norm": 1.0230928659439087,
"learning_rate": 0.0028712962962962963,
"loss": 0.4205,
"step": 2300
},
{
"epoch": 4.27,
"grad_norm": 0.7459888458251953,
"learning_rate": 0.002862037037037037,
"loss": 0.4316,
"step": 2310
},
{
"epoch": 4.29,
"grad_norm": 1.2074246406555176,
"learning_rate": 0.002852777777777778,
"loss": 0.4352,
"step": 2320
},
{
"epoch": 4.31,
"grad_norm": 1.7306902408599854,
"learning_rate": 0.0028435185185185187,
"loss": 0.391,
"step": 2330
},
{
"epoch": 4.33,
"grad_norm": 1.4361604452133179,
"learning_rate": 0.002834259259259259,
"loss": 0.4819,
"step": 2340
},
{
"epoch": 4.35,
"grad_norm": 1.3152241706848145,
"learning_rate": 0.002825,
"loss": 0.3861,
"step": 2350
},
{
"epoch": 4.37,
"grad_norm": 1.6985182762145996,
"learning_rate": 0.002815740740740741,
"loss": 0.4093,
"step": 2360
},
{
"epoch": 4.38,
"grad_norm": 1.1082671880722046,
"learning_rate": 0.0028064814814814814,
"loss": 0.5325,
"step": 2370
},
{
"epoch": 4.4,
"grad_norm": 1.39676833152771,
"learning_rate": 0.002797222222222222,
"loss": 0.4798,
"step": 2380
},
{
"epoch": 4.42,
"grad_norm": 1.0717923641204834,
"learning_rate": 0.0027879629629629634,
"loss": 0.3892,
"step": 2390
},
{
"epoch": 4.44,
"grad_norm": 0.82547926902771,
"learning_rate": 0.002778703703703704,
"loss": 0.4563,
"step": 2400
},
{
"epoch": 4.46,
"grad_norm": 1.1817727088928223,
"learning_rate": 0.0027694444444444446,
"loss": 0.4213,
"step": 2410
},
{
"epoch": 4.48,
"grad_norm": 1.127348780632019,
"learning_rate": 0.002760185185185185,
"loss": 0.4636,
"step": 2420
},
{
"epoch": 4.5,
"grad_norm": 1.2709828615188599,
"learning_rate": 0.002750925925925926,
"loss": 0.5099,
"step": 2430
},
{
"epoch": 4.51,
"grad_norm": 1.093279242515564,
"learning_rate": 0.0027416666666666666,
"loss": 0.439,
"step": 2440
},
{
"epoch": 4.53,
"grad_norm": 0.8145557045936584,
"learning_rate": 0.0027324074074074074,
"loss": 0.4192,
"step": 2450
},
{
"epoch": 4.55,
"grad_norm": 1.5339893102645874,
"learning_rate": 0.0027231481481481477,
"loss": 0.4591,
"step": 2460
},
{
"epoch": 4.57,
"grad_norm": 1.2502670288085938,
"learning_rate": 0.002713888888888889,
"loss": 0.4177,
"step": 2470
},
{
"epoch": 4.59,
"grad_norm": 0.951678991317749,
"learning_rate": 0.0027046296296296297,
"loss": 0.4206,
"step": 2480
},
{
"epoch": 4.61,
"grad_norm": 1.7269920110702515,
"learning_rate": 0.00269537037037037,
"loss": 0.4318,
"step": 2490
},
{
"epoch": 4.63,
"grad_norm": 1.3369319438934326,
"learning_rate": 0.0026861111111111113,
"loss": 0.3328,
"step": 2500
},
{
"epoch": 4.64,
"grad_norm": 1.1424506902694702,
"learning_rate": 0.002676851851851852,
"loss": 0.4531,
"step": 2510
},
{
"epoch": 4.66,
"grad_norm": 0.7604424953460693,
"learning_rate": 0.0026675925925925925,
"loss": 0.4192,
"step": 2520
},
{
"epoch": 4.68,
"grad_norm": 1.501299500465393,
"learning_rate": 0.0026583333333333333,
"loss": 0.4588,
"step": 2530
},
{
"epoch": 4.7,
"grad_norm": 0.7411832213401794,
"learning_rate": 0.002649074074074074,
"loss": 0.507,
"step": 2540
},
{
"epoch": 4.72,
"grad_norm": 0.9906969666481018,
"learning_rate": 0.002639814814814815,
"loss": 0.4733,
"step": 2550
},
{
"epoch": 4.74,
"grad_norm": 1.5065710544586182,
"learning_rate": 0.0026305555555555557,
"loss": 0.4134,
"step": 2560
},
{
"epoch": 4.75,
"grad_norm": 1.1822443008422852,
"learning_rate": 0.0026212962962962965,
"loss": 0.4283,
"step": 2570
},
{
"epoch": 4.77,
"grad_norm": 1.1529394388198853,
"learning_rate": 0.0026120370370370373,
"loss": 0.4447,
"step": 2580
},
{
"epoch": 4.79,
"grad_norm": 1.294115424156189,
"learning_rate": 0.0026027777777777776,
"loss": 0.4889,
"step": 2590
},
{
"epoch": 4.81,
"grad_norm": 0.7541661858558655,
"learning_rate": 0.0025935185185185184,
"loss": 0.4539,
"step": 2600
},
{
"epoch": 4.83,
"grad_norm": 1.0515060424804688,
"learning_rate": 0.0025842592592592597,
"loss": 0.4458,
"step": 2610
},
{
"epoch": 4.85,
"grad_norm": 1.2407331466674805,
"learning_rate": 0.002575,
"loss": 0.4479,
"step": 2620
},
{
"epoch": 4.87,
"grad_norm": 1.4178121089935303,
"learning_rate": 0.002565740740740741,
"loss": 0.451,
"step": 2630
},
{
"epoch": 4.88,
"grad_norm": 0.9205562472343445,
"learning_rate": 0.002556481481481481,
"loss": 0.4179,
"step": 2640
},
{
"epoch": 4.9,
"grad_norm": 1.106848120689392,
"learning_rate": 0.0025472222222222224,
"loss": 0.4341,
"step": 2650
},
{
"epoch": 4.92,
"grad_norm": 0.6212519407272339,
"learning_rate": 0.0025379629629629632,
"loss": 0.3167,
"step": 2660
},
{
"epoch": 4.94,
"grad_norm": 2.72360897064209,
"learning_rate": 0.0025287037037037036,
"loss": 0.4454,
"step": 2670
},
{
"epoch": 4.96,
"grad_norm": 1.535974144935608,
"learning_rate": 0.002519444444444445,
"loss": 0.4625,
"step": 2680
},
{
"epoch": 4.98,
"grad_norm": 0.6457435488700867,
"learning_rate": 0.002510185185185185,
"loss": 0.3886,
"step": 2690
},
{
"epoch": 5.0,
"grad_norm": 0.9673438668251038,
"learning_rate": 0.002500925925925926,
"loss": 0.4852,
"step": 2700
},
{
"epoch": 5.0,
"eval_accuracy": 0.9816669234324449,
"eval_f1": 0.9842159244197872,
"eval_loss": 0.0539265014231205,
"eval_precision": 0.9847615394086703,
"eval_recall": 0.9841734919436653,
"eval_runtime": 53.4958,
"eval_samples_per_second": 121.337,
"eval_steps_per_second": 7.589,
"step": 2702
},
{
"epoch": 5.01,
"grad_norm": 0.9143092036247253,
"learning_rate": 0.0024916666666666668,
"loss": 0.4017,
"step": 2710
},
{
"epoch": 5.03,
"grad_norm": 1.2292033433914185,
"learning_rate": 0.0024824074074074076,
"loss": 0.3677,
"step": 2720
},
{
"epoch": 5.05,
"grad_norm": 1.2112282514572144,
"learning_rate": 0.0024731481481481484,
"loss": 0.5256,
"step": 2730
},
{
"epoch": 5.07,
"grad_norm": 1.3062794208526611,
"learning_rate": 0.0024638888888888887,
"loss": 0.45,
"step": 2740
},
{
"epoch": 5.09,
"grad_norm": 1.280093789100647,
"learning_rate": 0.0024546296296296295,
"loss": 0.4144,
"step": 2750
},
{
"epoch": 5.11,
"grad_norm": 0.9576690196990967,
"learning_rate": 0.0024453703703703703,
"loss": 0.4183,
"step": 2760
},
{
"epoch": 5.12,
"grad_norm": 1.3858096599578857,
"learning_rate": 0.002436111111111111,
"loss": 0.4774,
"step": 2770
},
{
"epoch": 5.14,
"grad_norm": 0.9120557904243469,
"learning_rate": 0.002426851851851852,
"loss": 0.4282,
"step": 2780
},
{
"epoch": 5.16,
"grad_norm": 0.9812218546867371,
"learning_rate": 0.0024175925925925927,
"loss": 0.4386,
"step": 2790
},
{
"epoch": 5.18,
"grad_norm": 0.8847913146018982,
"learning_rate": 0.0024083333333333335,
"loss": 0.3769,
"step": 2800
},
{
"epoch": 5.2,
"grad_norm": 0.6346982717514038,
"learning_rate": 0.002399074074074074,
"loss": 0.404,
"step": 2810
},
{
"epoch": 5.22,
"grad_norm": 1.937452793121338,
"learning_rate": 0.002389814814814815,
"loss": 0.4098,
"step": 2820
},
{
"epoch": 5.24,
"grad_norm": 0.6616402864456177,
"learning_rate": 0.0023805555555555555,
"loss": 0.4038,
"step": 2830
},
{
"epoch": 5.25,
"grad_norm": 1.248590111732483,
"learning_rate": 0.0023712962962962963,
"loss": 0.4208,
"step": 2840
},
{
"epoch": 5.27,
"grad_norm": 1.5833156108856201,
"learning_rate": 0.002362037037037037,
"loss": 0.3919,
"step": 2850
},
{
"epoch": 5.29,
"grad_norm": 1.1102122068405151,
"learning_rate": 0.002352777777777778,
"loss": 0.4135,
"step": 2860
},
{
"epoch": 5.31,
"grad_norm": 1.2193430662155151,
"learning_rate": 0.0023435185185185187,
"loss": 0.4343,
"step": 2870
},
{
"epoch": 5.33,
"grad_norm": 1.5274019241333008,
"learning_rate": 0.0023342592592592594,
"loss": 0.397,
"step": 2880
},
{
"epoch": 5.35,
"grad_norm": 1.0362614393234253,
"learning_rate": 0.0023250000000000002,
"loss": 0.3789,
"step": 2890
},
{
"epoch": 5.37,
"grad_norm": 1.0950071811676025,
"learning_rate": 0.0023157407407407406,
"loss": 0.4146,
"step": 2900
},
{
"epoch": 5.38,
"grad_norm": 1.6324676275253296,
"learning_rate": 0.002306481481481482,
"loss": 0.4602,
"step": 2910
},
{
"epoch": 5.4,
"grad_norm": 1.0304063558578491,
"learning_rate": 0.002297222222222222,
"loss": 0.3736,
"step": 2920
},
{
"epoch": 5.42,
"grad_norm": 0.8070071339607239,
"learning_rate": 0.002287962962962963,
"loss": 0.4444,
"step": 2930
},
{
"epoch": 5.44,
"grad_norm": 1.5730412006378174,
"learning_rate": 0.002278703703703704,
"loss": 0.4247,
"step": 2940
},
{
"epoch": 5.46,
"grad_norm": 0.8822392821311951,
"learning_rate": 0.0022694444444444446,
"loss": 0.3523,
"step": 2950
},
{
"epoch": 5.48,
"grad_norm": 1.6417633295059204,
"learning_rate": 0.002260185185185185,
"loss": 0.428,
"step": 2960
},
{
"epoch": 5.49,
"grad_norm": 1.0473852157592773,
"learning_rate": 0.002250925925925926,
"loss": 0.3916,
"step": 2970
},
{
"epoch": 5.51,
"grad_norm": 1.1455719470977783,
"learning_rate": 0.0022416666666666665,
"loss": 0.3337,
"step": 2980
},
{
"epoch": 5.53,
"grad_norm": 0.9637939929962158,
"learning_rate": 0.0022324074074074073,
"loss": 0.4168,
"step": 2990
},
{
"epoch": 5.55,
"grad_norm": 1.650855541229248,
"learning_rate": 0.002223148148148148,
"loss": 0.3526,
"step": 3000
},
{
"epoch": 5.57,
"grad_norm": 0.8603165745735168,
"learning_rate": 0.002213888888888889,
"loss": 0.3343,
"step": 3010
},
{
"epoch": 5.59,
"grad_norm": 0.9890497922897339,
"learning_rate": 0.0022046296296296297,
"loss": 0.3577,
"step": 3020
},
{
"epoch": 5.61,
"grad_norm": 1.0613206624984741,
"learning_rate": 0.0021953703703703705,
"loss": 0.4288,
"step": 3030
},
{
"epoch": 5.62,
"grad_norm": 0.8673573136329651,
"learning_rate": 0.0021861111111111113,
"loss": 0.3787,
"step": 3040
},
{
"epoch": 5.64,
"grad_norm": 0.9730037450790405,
"learning_rate": 0.0021768518518518517,
"loss": 0.3901,
"step": 3050
},
{
"epoch": 5.66,
"grad_norm": 0.9955042004585266,
"learning_rate": 0.0021675925925925925,
"loss": 0.3267,
"step": 3060
},
{
"epoch": 5.68,
"grad_norm": 1.3361483812332153,
"learning_rate": 0.0021583333333333333,
"loss": 0.3821,
"step": 3070
},
{
"epoch": 5.7,
"grad_norm": 1.1696761846542358,
"learning_rate": 0.002149074074074074,
"loss": 0.4164,
"step": 3080
},
{
"epoch": 5.72,
"grad_norm": 0.9910469055175781,
"learning_rate": 0.002139814814814815,
"loss": 0.3488,
"step": 3090
},
{
"epoch": 5.74,
"grad_norm": 1.1704002618789673,
"learning_rate": 0.0021305555555555557,
"loss": 0.3773,
"step": 3100
},
{
"epoch": 5.75,
"grad_norm": 1.9228627681732178,
"learning_rate": 0.0021212962962962965,
"loss": 0.3609,
"step": 3110
},
{
"epoch": 5.77,
"grad_norm": 1.2759217023849487,
"learning_rate": 0.002112037037037037,
"loss": 0.339,
"step": 3120
},
{
"epoch": 5.79,
"grad_norm": 1.3884625434875488,
"learning_rate": 0.002102777777777778,
"loss": 0.3427,
"step": 3130
},
{
"epoch": 5.81,
"grad_norm": 1.0535517930984497,
"learning_rate": 0.0020935185185185184,
"loss": 0.3684,
"step": 3140
},
{
"epoch": 5.83,
"grad_norm": 0.7472217082977295,
"learning_rate": 0.0020842592592592592,
"loss": 0.3315,
"step": 3150
},
{
"epoch": 5.85,
"grad_norm": 0.9054597020149231,
"learning_rate": 0.002075,
"loss": 0.4055,
"step": 3160
},
{
"epoch": 5.86,
"grad_norm": 0.9936026930809021,
"learning_rate": 0.002065740740740741,
"loss": 0.3684,
"step": 3170
},
{
"epoch": 5.88,
"grad_norm": 0.924675464630127,
"learning_rate": 0.002056481481481481,
"loss": 0.4467,
"step": 3180
},
{
"epoch": 5.9,
"grad_norm": 0.9270643591880798,
"learning_rate": 0.0020472222222222224,
"loss": 0.4563,
"step": 3190
},
{
"epoch": 5.92,
"grad_norm": 1.1928519010543823,
"learning_rate": 0.002037962962962963,
"loss": 0.436,
"step": 3200
},
{
"epoch": 5.94,
"grad_norm": 0.7696529030799866,
"learning_rate": 0.0020287037037037036,
"loss": 0.3837,
"step": 3210
},
{
"epoch": 5.96,
"grad_norm": 0.787181556224823,
"learning_rate": 0.0020194444444444444,
"loss": 0.3855,
"step": 3220
},
{
"epoch": 5.98,
"grad_norm": 0.9738120436668396,
"learning_rate": 0.002010185185185185,
"loss": 0.4042,
"step": 3230
},
{
"epoch": 5.99,
"grad_norm": 0.7975121140480042,
"learning_rate": 0.002000925925925926,
"loss": 0.406,
"step": 3240
},
{
"epoch": 6.0,
"eval_accuracy": 0.9748883068864582,
"eval_f1": 0.9767778940213332,
"eval_loss": 0.08178560435771942,
"eval_precision": 0.9793037526964313,
"eval_recall": 0.9752347424170356,
"eval_runtime": 52.9882,
"eval_samples_per_second": 122.499,
"eval_steps_per_second": 7.662,
"step": 3243
},
{
"epoch": 6.01,
"grad_norm": 1.21721351146698,
"learning_rate": 0.0019916666666666668,
"loss": 0.3252,
"step": 3250
},
{
"epoch": 6.03,
"grad_norm": 1.1153512001037598,
"learning_rate": 0.0019824074074074076,
"loss": 0.364,
"step": 3260
},
{
"epoch": 6.05,
"grad_norm": 0.9769272208213806,
"learning_rate": 0.001973148148148148,
"loss": 0.3886,
"step": 3270
},
{
"epoch": 6.07,
"grad_norm": 1.260012149810791,
"learning_rate": 0.001963888888888889,
"loss": 0.3462,
"step": 3280
},
{
"epoch": 6.09,
"grad_norm": 1.2162351608276367,
"learning_rate": 0.0019546296296296295,
"loss": 0.3472,
"step": 3290
},
{
"epoch": 6.11,
"grad_norm": 1.2838939428329468,
"learning_rate": 0.0019453703703703703,
"loss": 0.3427,
"step": 3300
},
{
"epoch": 6.12,
"grad_norm": 0.850970447063446,
"learning_rate": 0.0019361111111111113,
"loss": 0.3793,
"step": 3310
},
{
"epoch": 6.14,
"grad_norm": 0.7692118883132935,
"learning_rate": 0.001926851851851852,
"loss": 0.3029,
"step": 3320
},
{
"epoch": 6.16,
"grad_norm": 1.337117314338684,
"learning_rate": 0.0019175925925925927,
"loss": 0.3526,
"step": 3330
},
{
"epoch": 6.18,
"grad_norm": 1.0097956657409668,
"learning_rate": 0.0019083333333333333,
"loss": 0.3544,
"step": 3340
},
{
"epoch": 6.2,
"grad_norm": 1.1597776412963867,
"learning_rate": 0.001899074074074074,
"loss": 0.3266,
"step": 3350
},
{
"epoch": 6.22,
"grad_norm": 0.8050183653831482,
"learning_rate": 0.0018898148148148149,
"loss": 0.352,
"step": 3360
},
{
"epoch": 6.23,
"grad_norm": 1.197143316268921,
"learning_rate": 0.0018805555555555557,
"loss": 0.3538,
"step": 3370
},
{
"epoch": 6.25,
"grad_norm": 1.5818617343902588,
"learning_rate": 0.0018712962962962962,
"loss": 0.3441,
"step": 3380
},
{
"epoch": 6.27,
"grad_norm": 0.6569982767105103,
"learning_rate": 0.001862037037037037,
"loss": 0.3436,
"step": 3390
},
{
"epoch": 6.29,
"grad_norm": 0.7630707025527954,
"learning_rate": 0.0018527777777777778,
"loss": 0.3789,
"step": 3400
},
{
"epoch": 6.31,
"grad_norm": 0.8499374389648438,
"learning_rate": 0.0018435185185185186,
"loss": 0.3351,
"step": 3410
},
{
"epoch": 6.33,
"grad_norm": 0.8909932374954224,
"learning_rate": 0.0018342592592592594,
"loss": 0.2914,
"step": 3420
},
{
"epoch": 6.35,
"grad_norm": 0.9815020561218262,
"learning_rate": 0.001825,
"loss": 0.3234,
"step": 3430
},
{
"epoch": 6.36,
"grad_norm": 0.9451941251754761,
"learning_rate": 0.0018157407407407408,
"loss": 0.3016,
"step": 3440
},
{
"epoch": 6.38,
"grad_norm": 1.1062078475952148,
"learning_rate": 0.0018064814814814814,
"loss": 0.3243,
"step": 3450
},
{
"epoch": 6.4,
"grad_norm": 1.119948387145996,
"learning_rate": 0.0017972222222222224,
"loss": 0.4004,
"step": 3460
},
{
"epoch": 6.42,
"grad_norm": 1.0116353034973145,
"learning_rate": 0.001787962962962963,
"loss": 0.3816,
"step": 3470
},
{
"epoch": 6.44,
"grad_norm": 1.270749568939209,
"learning_rate": 0.0017787037037037038,
"loss": 0.3961,
"step": 3480
},
{
"epoch": 6.46,
"grad_norm": 0.7347724437713623,
"learning_rate": 0.0017694444444444444,
"loss": 0.3183,
"step": 3490
},
{
"epoch": 6.48,
"grad_norm": 1.3908417224884033,
"learning_rate": 0.0017601851851851852,
"loss": 0.3717,
"step": 3500
},
{
"epoch": 6.49,
"grad_norm": 1.0049611330032349,
"learning_rate": 0.0017509259259259262,
"loss": 0.3804,
"step": 3510
},
{
"epoch": 6.51,
"grad_norm": 0.8181151747703552,
"learning_rate": 0.0017416666666666668,
"loss": 0.3683,
"step": 3520
},
{
"epoch": 6.53,
"grad_norm": 1.0311352014541626,
"learning_rate": 0.0017324074074074076,
"loss": 0.3813,
"step": 3530
},
{
"epoch": 6.55,
"grad_norm": 0.6164132356643677,
"learning_rate": 0.0017231481481481481,
"loss": 0.342,
"step": 3540
},
{
"epoch": 6.57,
"grad_norm": 0.7826759219169617,
"learning_rate": 0.001713888888888889,
"loss": 0.2858,
"step": 3550
},
{
"epoch": 6.59,
"grad_norm": 0.6946287155151367,
"learning_rate": 0.0017046296296296295,
"loss": 0.3245,
"step": 3560
},
{
"epoch": 6.6,
"grad_norm": 1.3883477449417114,
"learning_rate": 0.0016953703703703705,
"loss": 0.3492,
"step": 3570
},
{
"epoch": 6.62,
"grad_norm": 1.0445375442504883,
"learning_rate": 0.001686111111111111,
"loss": 0.2873,
"step": 3580
},
{
"epoch": 6.64,
"grad_norm": 1.155643105506897,
"learning_rate": 0.001676851851851852,
"loss": 0.3771,
"step": 3590
},
{
"epoch": 6.66,
"grad_norm": 0.8850951790809631,
"learning_rate": 0.0016675925925925925,
"loss": 0.3412,
"step": 3600
},
{
"epoch": 6.68,
"grad_norm": 0.8685129880905151,
"learning_rate": 0.0016583333333333333,
"loss": 0.3489,
"step": 3610
},
{
"epoch": 6.7,
"grad_norm": 1.20561683177948,
"learning_rate": 0.0016490740740740743,
"loss": 0.3253,
"step": 3620
},
{
"epoch": 6.72,
"grad_norm": 0.8918961882591248,
"learning_rate": 0.0016398148148148149,
"loss": 0.3279,
"step": 3630
},
{
"epoch": 6.73,
"grad_norm": 1.1090009212493896,
"learning_rate": 0.0016305555555555557,
"loss": 0.2887,
"step": 3640
},
{
"epoch": 6.75,
"grad_norm": 1.1889126300811768,
"learning_rate": 0.0016212962962962962,
"loss": 0.3391,
"step": 3650
},
{
"epoch": 6.77,
"grad_norm": 0.7774761915206909,
"learning_rate": 0.001612037037037037,
"loss": 0.354,
"step": 3660
},
{
"epoch": 6.79,
"grad_norm": 0.9942853450775146,
"learning_rate": 0.0016027777777777776,
"loss": 0.4198,
"step": 3670
},
{
"epoch": 6.81,
"grad_norm": 0.5383427143096924,
"learning_rate": 0.0015935185185185186,
"loss": 0.2784,
"step": 3680
},
{
"epoch": 6.83,
"grad_norm": 1.0659656524658203,
"learning_rate": 0.0015842592592592592,
"loss": 0.3369,
"step": 3690
},
{
"epoch": 6.85,
"grad_norm": 1.0167818069458008,
"learning_rate": 0.001575,
"loss": 0.3465,
"step": 3700
},
{
"epoch": 6.86,
"grad_norm": 0.9056137204170227,
"learning_rate": 0.0015657407407407408,
"loss": 0.3111,
"step": 3710
},
{
"epoch": 6.88,
"grad_norm": 0.9691112041473389,
"learning_rate": 0.0015564814814814816,
"loss": 0.2901,
"step": 3720
},
{
"epoch": 6.9,
"grad_norm": 1.0727977752685547,
"learning_rate": 0.0015472222222222224,
"loss": 0.3614,
"step": 3730
},
{
"epoch": 6.92,
"grad_norm": 0.8301049470901489,
"learning_rate": 0.001537962962962963,
"loss": 0.3251,
"step": 3740
},
{
"epoch": 6.94,
"grad_norm": 0.6486156582832336,
"learning_rate": 0.0015287037037037038,
"loss": 0.3135,
"step": 3750
},
{
"epoch": 6.96,
"grad_norm": 1.2031179666519165,
"learning_rate": 0.0015194444444444444,
"loss": 0.3371,
"step": 3760
},
{
"epoch": 6.98,
"grad_norm": 1.1333447694778442,
"learning_rate": 0.0015101851851851854,
"loss": 0.3242,
"step": 3770
},
{
"epoch": 6.99,
"grad_norm": 1.0811736583709717,
"learning_rate": 0.001500925925925926,
"loss": 0.3074,
"step": 3780
},
{
"epoch": 7.0,
"eval_accuracy": 0.966569095670929,
"eval_f1": 0.9782929900155292,
"eval_loss": 0.1289132982492447,
"eval_precision": 0.9815165094060785,
"eval_recall": 0.977802007605107,
"eval_runtime": 52.9121,
"eval_samples_per_second": 122.675,
"eval_steps_per_second": 7.673,
"step": 3783
},
{
"epoch": 7.01,
"grad_norm": 0.5184229016304016,
"learning_rate": 0.0014916666666666667,
"loss": 0.3532,
"step": 3790
},
{
"epoch": 7.03,
"grad_norm": 0.9822409152984619,
"learning_rate": 0.0014824074074074073,
"loss": 0.3701,
"step": 3800
},
{
"epoch": 7.05,
"grad_norm": 0.6723494529724121,
"learning_rate": 0.0014731481481481481,
"loss": 0.3553,
"step": 3810
},
{
"epoch": 7.07,
"grad_norm": 1.0363885164260864,
"learning_rate": 0.0014638888888888891,
"loss": 0.2526,
"step": 3820
},
{
"epoch": 7.09,
"grad_norm": 0.9155850410461426,
"learning_rate": 0.0014546296296296297,
"loss": 0.3045,
"step": 3830
},
{
"epoch": 7.1,
"grad_norm": 1.412392497062683,
"learning_rate": 0.0014453703703703705,
"loss": 0.2868,
"step": 3840
},
{
"epoch": 7.12,
"grad_norm": 0.9707776308059692,
"learning_rate": 0.001436111111111111,
"loss": 0.3537,
"step": 3850
},
{
"epoch": 7.14,
"grad_norm": 1.2895119190216064,
"learning_rate": 0.001426851851851852,
"loss": 0.2961,
"step": 3860
},
{
"epoch": 7.16,
"grad_norm": 0.8239867687225342,
"learning_rate": 0.0014175925925925925,
"loss": 0.2836,
"step": 3870
},
{
"epoch": 7.18,
"grad_norm": 1.1721490621566772,
"learning_rate": 0.0014083333333333335,
"loss": 0.3331,
"step": 3880
},
{
"epoch": 7.2,
"grad_norm": 0.6656754612922668,
"learning_rate": 0.001399074074074074,
"loss": 0.3221,
"step": 3890
},
{
"epoch": 7.22,
"grad_norm": 0.8847745656967163,
"learning_rate": 0.0013898148148148149,
"loss": 0.3361,
"step": 3900
},
{
"epoch": 7.23,
"grad_norm": 1.1900001764297485,
"learning_rate": 0.0013805555555555554,
"loss": 0.278,
"step": 3910
},
{
"epoch": 7.25,
"grad_norm": 0.6611624956130981,
"learning_rate": 0.0013712962962962962,
"loss": 0.3078,
"step": 3920
},
{
"epoch": 7.27,
"grad_norm": 0.8776438236236572,
"learning_rate": 0.0013620370370370373,
"loss": 0.3361,
"step": 3930
},
{
"epoch": 7.29,
"grad_norm": 0.8815858960151672,
"learning_rate": 0.0013527777777777778,
"loss": 0.2727,
"step": 3940
},
{
"epoch": 7.31,
"grad_norm": 0.8579031825065613,
"learning_rate": 0.0013435185185185186,
"loss": 0.2954,
"step": 3950
},
{
"epoch": 7.33,
"grad_norm": 0.9951643943786621,
"learning_rate": 0.0013342592592592592,
"loss": 0.3186,
"step": 3960
},
{
"epoch": 7.35,
"grad_norm": 0.7032579779624939,
"learning_rate": 0.001325,
"loss": 0.3374,
"step": 3970
},
{
"epoch": 7.36,
"grad_norm": 0.8467724919319153,
"learning_rate": 0.0013157407407407406,
"loss": 0.3459,
"step": 3980
},
{
"epoch": 7.38,
"grad_norm": 1.0427024364471436,
"learning_rate": 0.0013064814814814816,
"loss": 0.3223,
"step": 3990
},
{
"epoch": 7.4,
"grad_norm": 1.081141710281372,
"learning_rate": 0.0012972222222222222,
"loss": 0.3421,
"step": 4000
},
{
"epoch": 7.42,
"grad_norm": 1.1036946773529053,
"learning_rate": 0.001287962962962963,
"loss": 0.3357,
"step": 4010
},
{
"epoch": 7.44,
"grad_norm": 0.47026312351226807,
"learning_rate": 0.0012787037037037038,
"loss": 0.3413,
"step": 4020
},
{
"epoch": 7.46,
"grad_norm": 0.6322646141052246,
"learning_rate": 0.0012694444444444444,
"loss": 0.3384,
"step": 4030
},
{
"epoch": 7.47,
"grad_norm": 1.1694568395614624,
"learning_rate": 0.0012601851851851854,
"loss": 0.2964,
"step": 4040
},
{
"epoch": 7.49,
"grad_norm": 0.6389946341514587,
"learning_rate": 0.001250925925925926,
"loss": 0.2363,
"step": 4050
},
{
"epoch": 7.51,
"grad_norm": 0.8997761011123657,
"learning_rate": 0.0012416666666666667,
"loss": 0.3002,
"step": 4060
},
{
"epoch": 7.53,
"grad_norm": 1.0065665245056152,
"learning_rate": 0.0012324074074074073,
"loss": 0.2559,
"step": 4070
},
{
"epoch": 7.55,
"grad_norm": 1.022987723350525,
"learning_rate": 0.0012231481481481483,
"loss": 0.3338,
"step": 4080
},
{
"epoch": 7.57,
"grad_norm": 1.0679833889007568,
"learning_rate": 0.001213888888888889,
"loss": 0.284,
"step": 4090
},
{
"epoch": 7.59,
"grad_norm": 0.607288122177124,
"learning_rate": 0.0012046296296296297,
"loss": 0.2509,
"step": 4100
},
{
"epoch": 7.6,
"grad_norm": 0.5742871165275574,
"learning_rate": 0.0011953703703703705,
"loss": 0.2537,
"step": 4110
},
{
"epoch": 7.62,
"grad_norm": 1.2387137413024902,
"learning_rate": 0.001186111111111111,
"loss": 0.2565,
"step": 4120
},
{
"epoch": 7.64,
"grad_norm": 1.174902319908142,
"learning_rate": 0.0011768518518518519,
"loss": 0.2997,
"step": 4130
},
{
"epoch": 7.66,
"grad_norm": 0.9305229783058167,
"learning_rate": 0.0011675925925925927,
"loss": 0.3119,
"step": 4140
},
{
"epoch": 7.68,
"grad_norm": 0.8137575387954712,
"learning_rate": 0.0011583333333333333,
"loss": 0.3212,
"step": 4150
},
{
"epoch": 7.7,
"grad_norm": 0.8960317373275757,
"learning_rate": 0.001149074074074074,
"loss": 0.2598,
"step": 4160
},
{
"epoch": 7.72,
"grad_norm": 0.9751859903335571,
"learning_rate": 0.0011398148148148149,
"loss": 0.2888,
"step": 4170
},
{
"epoch": 7.73,
"grad_norm": 1.0055298805236816,
"learning_rate": 0.0011305555555555557,
"loss": 0.3002,
"step": 4180
},
{
"epoch": 7.75,
"grad_norm": 0.8062968254089355,
"learning_rate": 0.0011212962962962965,
"loss": 0.3344,
"step": 4190
},
{
"epoch": 7.77,
"grad_norm": 0.95457923412323,
"learning_rate": 0.001112037037037037,
"loss": 0.3063,
"step": 4200
},
{
"epoch": 7.79,
"grad_norm": 1.3791453838348389,
"learning_rate": 0.0011027777777777778,
"loss": 0.317,
"step": 4210
},
{
"epoch": 7.81,
"grad_norm": 0.874679684638977,
"learning_rate": 0.0010935185185185186,
"loss": 0.3217,
"step": 4220
},
{
"epoch": 7.83,
"grad_norm": 0.5453481078147888,
"learning_rate": 0.0010842592592592592,
"loss": 0.2493,
"step": 4230
},
{
"epoch": 7.84,
"grad_norm": 1.0427902936935425,
"learning_rate": 0.001075,
"loss": 0.2792,
"step": 4240
},
{
"epoch": 7.86,
"grad_norm": 1.1764284372329712,
"learning_rate": 0.0010657407407407408,
"loss": 0.2898,
"step": 4250
},
{
"epoch": 7.88,
"grad_norm": 0.6771812438964844,
"learning_rate": 0.0010564814814814814,
"loss": 0.373,
"step": 4260
},
{
"epoch": 7.9,
"grad_norm": 0.8747534155845642,
"learning_rate": 0.0010472222222222222,
"loss": 0.268,
"step": 4270
},
{
"epoch": 7.92,
"grad_norm": 1.2883553504943848,
"learning_rate": 0.001037962962962963,
"loss": 0.2717,
"step": 4280
},
{
"epoch": 7.94,
"grad_norm": 0.7051785588264465,
"learning_rate": 0.0010287037037037038,
"loss": 0.3066,
"step": 4290
},
{
"epoch": 7.96,
"grad_norm": 0.7398721575737,
"learning_rate": 0.0010194444444444446,
"loss": 0.2995,
"step": 4300
},
{
"epoch": 7.97,
"grad_norm": 1.145930290222168,
"learning_rate": 0.0010101851851851851,
"loss": 0.3045,
"step": 4310
},
{
"epoch": 7.99,
"grad_norm": 1.0502551794052124,
"learning_rate": 0.001000925925925926,
"loss": 0.2679,
"step": 4320
},
{
"epoch": 8.0,
"eval_accuracy": 0.9899861346479741,
"eval_f1": 0.9912451005847,
"eval_loss": 0.031062940135598183,
"eval_precision": 0.9916388059857394,
"eval_recall": 0.9909443527430603,
"eval_runtime": 52.8878,
"eval_samples_per_second": 122.731,
"eval_steps_per_second": 7.677,
"step": 4324
},
{
"epoch": 8.01,
"grad_norm": 1.0673542022705078,
"learning_rate": 0.0009916666666666667,
"loss": 0.3395,
"step": 4330
},
{
"epoch": 8.03,
"grad_norm": 0.7167376279830933,
"learning_rate": 0.0009824074074074073,
"loss": 0.2648,
"step": 4340
},
{
"epoch": 8.05,
"grad_norm": 0.7471597194671631,
"learning_rate": 0.0009731481481481481,
"loss": 0.3121,
"step": 4350
},
{
"epoch": 8.07,
"grad_norm": 1.22334623336792,
"learning_rate": 0.0009638888888888889,
"loss": 0.2867,
"step": 4360
},
{
"epoch": 8.09,
"grad_norm": 1.0853790044784546,
"learning_rate": 0.0009546296296296296,
"loss": 0.2639,
"step": 4370
},
{
"epoch": 8.1,
"grad_norm": 0.9974374771118164,
"learning_rate": 0.0009453703703703703,
"loss": 0.2679,
"step": 4380
},
{
"epoch": 8.12,
"grad_norm": 0.5487973093986511,
"learning_rate": 0.0009361111111111112,
"loss": 0.2552,
"step": 4390
},
{
"epoch": 8.14,
"grad_norm": 1.0462454557418823,
"learning_rate": 0.0009268518518518519,
"loss": 0.2371,
"step": 4400
},
{
"epoch": 8.16,
"grad_norm": 0.9754990935325623,
"learning_rate": 0.0009175925925925927,
"loss": 0.2792,
"step": 4410
},
{
"epoch": 8.18,
"grad_norm": 1.371444821357727,
"learning_rate": 0.0009083333333333334,
"loss": 0.2508,
"step": 4420
},
{
"epoch": 8.2,
"grad_norm": 1.4113060235977173,
"learning_rate": 0.0008990740740740741,
"loss": 0.2875,
"step": 4430
},
{
"epoch": 8.21,
"grad_norm": 0.7861378788948059,
"learning_rate": 0.0008898148148148149,
"loss": 0.3163,
"step": 4440
},
{
"epoch": 8.23,
"grad_norm": 0.6464210152626038,
"learning_rate": 0.0008805555555555555,
"loss": 0.2845,
"step": 4450
},
{
"epoch": 8.25,
"grad_norm": 1.1672011613845825,
"learning_rate": 0.0008712962962962962,
"loss": 0.2807,
"step": 4460
},
{
"epoch": 8.27,
"grad_norm": 0.8441992402076721,
"learning_rate": 0.000862037037037037,
"loss": 0.3093,
"step": 4470
},
{
"epoch": 8.29,
"grad_norm": 0.6474927663803101,
"learning_rate": 0.0008527777777777777,
"loss": 0.2609,
"step": 4480
},
{
"epoch": 8.31,
"grad_norm": 0.5043164491653442,
"learning_rate": 0.0008435185185185186,
"loss": 0.3138,
"step": 4490
},
{
"epoch": 8.33,
"grad_norm": 0.9660760164260864,
"learning_rate": 0.0008342592592592593,
"loss": 0.3032,
"step": 4500
},
{
"epoch": 8.34,
"grad_norm": 1.2470887899398804,
"learning_rate": 0.0008250000000000001,
"loss": 0.3015,
"step": 4510
},
{
"epoch": 8.36,
"grad_norm": 1.4032260179519653,
"learning_rate": 0.0008157407407407408,
"loss": 0.3132,
"step": 4520
},
{
"epoch": 8.38,
"grad_norm": 0.9354479312896729,
"learning_rate": 0.0008064814814814815,
"loss": 0.2862,
"step": 4530
},
{
"epoch": 8.4,
"grad_norm": 0.8781515955924988,
"learning_rate": 0.0007972222222222223,
"loss": 0.2636,
"step": 4540
},
{
"epoch": 8.42,
"grad_norm": 0.8106943964958191,
"learning_rate": 0.000787962962962963,
"loss": 0.3125,
"step": 4550
},
{
"epoch": 8.44,
"grad_norm": 0.7355839610099792,
"learning_rate": 0.0007787037037037037,
"loss": 0.2896,
"step": 4560
},
{
"epoch": 8.46,
"grad_norm": 0.6883030533790588,
"learning_rate": 0.0007694444444444445,
"loss": 0.2534,
"step": 4570
},
{
"epoch": 8.47,
"grad_norm": 0.7917162179946899,
"learning_rate": 0.0007601851851851851,
"loss": 0.2627,
"step": 4580
},
{
"epoch": 8.49,
"grad_norm": 0.8958062529563904,
"learning_rate": 0.0007509259259259258,
"loss": 0.2573,
"step": 4590
},
{
"epoch": 8.51,
"grad_norm": 0.8534148335456848,
"learning_rate": 0.0007416666666666667,
"loss": 0.2715,
"step": 4600
},
{
"epoch": 8.53,
"grad_norm": 0.8119780421257019,
"learning_rate": 0.0007324074074074074,
"loss": 0.2455,
"step": 4610
},
{
"epoch": 8.55,
"grad_norm": 1.1178597211837769,
"learning_rate": 0.0007231481481481482,
"loss": 0.2798,
"step": 4620
},
{
"epoch": 8.57,
"grad_norm": 0.8982829451560974,
"learning_rate": 0.0007138888888888889,
"loss": 0.2816,
"step": 4630
},
{
"epoch": 8.58,
"grad_norm": 0.8414255976676941,
"learning_rate": 0.0007046296296296296,
"loss": 0.263,
"step": 4640
},
{
"epoch": 8.6,
"grad_norm": 0.6894321441650391,
"learning_rate": 0.0006953703703703704,
"loss": 0.2551,
"step": 4650
},
{
"epoch": 8.62,
"grad_norm": 0.9369879364967346,
"learning_rate": 0.0006861111111111111,
"loss": 0.234,
"step": 4660
},
{
"epoch": 8.64,
"grad_norm": 0.7685711979866028,
"learning_rate": 0.0006768518518518519,
"loss": 0.2669,
"step": 4670
},
{
"epoch": 8.66,
"grad_norm": 0.866146981716156,
"learning_rate": 0.0006675925925925926,
"loss": 0.2613,
"step": 4680
},
{
"epoch": 8.68,
"grad_norm": 0.7694200873374939,
"learning_rate": 0.0006583333333333333,
"loss": 0.3076,
"step": 4690
},
{
"epoch": 8.7,
"grad_norm": 0.7113105058670044,
"learning_rate": 0.0006490740740740742,
"loss": 0.2818,
"step": 4700
},
{
"epoch": 8.71,
"grad_norm": 0.6957096457481384,
"learning_rate": 0.0006398148148148148,
"loss": 0.2123,
"step": 4710
},
{
"epoch": 8.73,
"grad_norm": 0.7736373543739319,
"learning_rate": 0.0006305555555555556,
"loss": 0.2896,
"step": 4720
},
{
"epoch": 8.75,
"grad_norm": 0.7972449660301208,
"learning_rate": 0.0006212962962962963,
"loss": 0.2606,
"step": 4730
},
{
"epoch": 8.77,
"grad_norm": 1.4942100048065186,
"learning_rate": 0.000612037037037037,
"loss": 0.2688,
"step": 4740
},
{
"epoch": 8.79,
"grad_norm": 0.5527027249336243,
"learning_rate": 0.0006027777777777778,
"loss": 0.2311,
"step": 4750
},
{
"epoch": 8.81,
"grad_norm": 0.7232396006584167,
"learning_rate": 0.0005935185185185185,
"loss": 0.2102,
"step": 4760
},
{
"epoch": 8.83,
"grad_norm": 0.8936424851417542,
"learning_rate": 0.0005842592592592592,
"loss": 0.2185,
"step": 4770
},
{
"epoch": 8.84,
"grad_norm": 0.5523704290390015,
"learning_rate": 0.000575,
"loss": 0.2443,
"step": 4780
},
{
"epoch": 8.86,
"grad_norm": 0.6891120672225952,
"learning_rate": 0.0005657407407407408,
"loss": 0.2722,
"step": 4790
},
{
"epoch": 8.88,
"grad_norm": 0.8618466854095459,
"learning_rate": 0.0005564814814814815,
"loss": 0.2782,
"step": 4800
},
{
"epoch": 8.9,
"grad_norm": 0.601006269454956,
"learning_rate": 0.0005472222222222223,
"loss": 0.3289,
"step": 4810
},
{
"epoch": 8.92,
"grad_norm": 0.5365596413612366,
"learning_rate": 0.000537962962962963,
"loss": 0.2511,
"step": 4820
},
{
"epoch": 8.94,
"grad_norm": 1.0211495161056519,
"learning_rate": 0.0005287037037037038,
"loss": 0.2964,
"step": 4830
},
{
"epoch": 8.95,
"grad_norm": 0.838763952255249,
"learning_rate": 0.0005194444444444444,
"loss": 0.2373,
"step": 4840
},
{
"epoch": 8.97,
"grad_norm": 0.9094264507293701,
"learning_rate": 0.0005101851851851852,
"loss": 0.2447,
"step": 4850
},
{
"epoch": 8.99,
"grad_norm": 0.5573397874832153,
"learning_rate": 0.0005009259259259259,
"loss": 0.2439,
"step": 4860
},
{
"epoch": 9.0,
"eval_accuracy": 0.9850562317054383,
"eval_f1": 0.9881355002597147,
"eval_loss": 0.05773022025823593,
"eval_precision": 0.9885823118322556,
"eval_recall": 0.9880245106891419,
"eval_runtime": 52.821,
"eval_samples_per_second": 122.887,
"eval_steps_per_second": 7.686,
"step": 4864
},
{
"epoch": 9.01,
"grad_norm": 0.5047478675842285,
"learning_rate": 0.0004916666666666666,
"loss": 0.2301,
"step": 4870
},
{
"epoch": 9.03,
"grad_norm": 0.8074342012405396,
"learning_rate": 0.00048240740740740747,
"loss": 0.2168,
"step": 4880
},
{
"epoch": 9.05,
"grad_norm": 0.9957315921783447,
"learning_rate": 0.00047314814814814816,
"loss": 0.2546,
"step": 4890
},
{
"epoch": 9.07,
"grad_norm": 1.1737314462661743,
"learning_rate": 0.0004638888888888889,
"loss": 0.2526,
"step": 4900
},
{
"epoch": 9.08,
"grad_norm": 0.47173938155174255,
"learning_rate": 0.00045462962962962964,
"loss": 0.281,
"step": 4910
},
{
"epoch": 9.1,
"grad_norm": 0.7715136408805847,
"learning_rate": 0.00044537037037037033,
"loss": 0.2679,
"step": 4920
},
{
"epoch": 9.12,
"grad_norm": 1.0114617347717285,
"learning_rate": 0.00043611111111111113,
"loss": 0.2568,
"step": 4930
},
{
"epoch": 9.14,
"grad_norm": 0.7581719160079956,
"learning_rate": 0.00042685185185185187,
"loss": 0.1955,
"step": 4940
},
{
"epoch": 9.16,
"grad_norm": 0.9538590908050537,
"learning_rate": 0.0004175925925925926,
"loss": 0.2764,
"step": 4950
},
{
"epoch": 9.18,
"grad_norm": 0.9304882884025574,
"learning_rate": 0.00040833333333333336,
"loss": 0.2225,
"step": 4960
},
{
"epoch": 9.2,
"grad_norm": 1.7399593591690063,
"learning_rate": 0.00039907407407407404,
"loss": 0.2194,
"step": 4970
},
{
"epoch": 9.21,
"grad_norm": 1.2996737957000732,
"learning_rate": 0.0003898148148148148,
"loss": 0.2552,
"step": 4980
},
{
"epoch": 9.23,
"grad_norm": 0.9532219767570496,
"learning_rate": 0.0003805555555555556,
"loss": 0.2371,
"step": 4990
},
{
"epoch": 9.25,
"grad_norm": 0.5695217251777649,
"learning_rate": 0.0003712962962962963,
"loss": 0.1966,
"step": 5000
},
{
"epoch": 9.27,
"grad_norm": 0.8924893140792847,
"learning_rate": 0.000362037037037037,
"loss": 0.2367,
"step": 5010
},
{
"epoch": 9.29,
"grad_norm": 0.4599965214729309,
"learning_rate": 0.00035277777777777776,
"loss": 0.2338,
"step": 5020
},
{
"epoch": 9.31,
"grad_norm": 1.544420838356018,
"learning_rate": 0.0003435185185185185,
"loss": 0.2474,
"step": 5030
},
{
"epoch": 9.32,
"grad_norm": 0.7134206295013428,
"learning_rate": 0.0003342592592592593,
"loss": 0.2406,
"step": 5040
},
{
"epoch": 9.34,
"grad_norm": 0.510735273361206,
"learning_rate": 0.00032500000000000004,
"loss": 0.2181,
"step": 5050
},
{
"epoch": 9.36,
"grad_norm": 0.7918537855148315,
"learning_rate": 0.00031574074074074073,
"loss": 0.2582,
"step": 5060
},
{
"epoch": 9.38,
"grad_norm": 1.1104782819747925,
"learning_rate": 0.00030648148148148147,
"loss": 0.2531,
"step": 5070
},
{
"epoch": 9.4,
"grad_norm": 0.8858848810195923,
"learning_rate": 0.0002972222222222222,
"loss": 0.256,
"step": 5080
},
{
"epoch": 9.42,
"grad_norm": 0.9814337491989136,
"learning_rate": 0.00028796296296296296,
"loss": 0.2301,
"step": 5090
},
{
"epoch": 9.44,
"grad_norm": 0.8091614246368408,
"learning_rate": 0.0002787037037037037,
"loss": 0.243,
"step": 5100
},
{
"epoch": 9.45,
"grad_norm": 0.8779675960540771,
"learning_rate": 0.00026944444444444444,
"loss": 0.2248,
"step": 5110
},
{
"epoch": 9.47,
"grad_norm": 0.7793891429901123,
"learning_rate": 0.0002601851851851852,
"loss": 0.2431,
"step": 5120
},
{
"epoch": 9.49,
"grad_norm": 0.7594252824783325,
"learning_rate": 0.0002509259259259259,
"loss": 0.2766,
"step": 5130
},
{
"epoch": 9.51,
"grad_norm": 0.7500623464584351,
"learning_rate": 0.00024166666666666667,
"loss": 0.2608,
"step": 5140
},
{
"epoch": 9.53,
"grad_norm": 1.316781997680664,
"learning_rate": 0.00023240740740740744,
"loss": 0.2924,
"step": 5150
},
{
"epoch": 9.55,
"grad_norm": 0.6205342411994934,
"learning_rate": 0.00022314814814814815,
"loss": 0.2192,
"step": 5160
},
{
"epoch": 9.57,
"grad_norm": 0.4915425181388855,
"learning_rate": 0.0002138888888888889,
"loss": 0.2284,
"step": 5170
},
{
"epoch": 9.58,
"grad_norm": 0.5837624669075012,
"learning_rate": 0.00020462962962962964,
"loss": 0.2481,
"step": 5180
},
{
"epoch": 9.6,
"grad_norm": 1.0567339658737183,
"learning_rate": 0.00019537037037037038,
"loss": 0.2289,
"step": 5190
},
{
"epoch": 9.62,
"grad_norm": 0.548608660697937,
"learning_rate": 0.0001861111111111111,
"loss": 0.2723,
"step": 5200
},
{
"epoch": 9.64,
"grad_norm": 0.46330851316452026,
"learning_rate": 0.00017685185185185187,
"loss": 0.2482,
"step": 5210
},
{
"epoch": 9.66,
"grad_norm": 0.5314125418663025,
"learning_rate": 0.00016759259259259258,
"loss": 0.2305,
"step": 5220
},
{
"epoch": 9.68,
"grad_norm": 1.2373839616775513,
"learning_rate": 0.00015833333333333335,
"loss": 0.2508,
"step": 5230
},
{
"epoch": 9.69,
"grad_norm": 0.63730788230896,
"learning_rate": 0.00014907407407407407,
"loss": 0.2366,
"step": 5240
},
{
"epoch": 9.71,
"grad_norm": 0.8189606666564941,
"learning_rate": 0.0001398148148148148,
"loss": 0.2473,
"step": 5250
},
{
"epoch": 9.73,
"grad_norm": 0.4223102629184723,
"learning_rate": 0.00013055555555555558,
"loss": 0.1921,
"step": 5260
},
{
"epoch": 9.75,
"grad_norm": 0.8108687996864319,
"learning_rate": 0.0001212962962962963,
"loss": 0.211,
"step": 5270
},
{
"epoch": 9.77,
"grad_norm": 1.0115864276885986,
"learning_rate": 0.00011203703703703704,
"loss": 0.2199,
"step": 5280
},
{
"epoch": 9.79,
"grad_norm": 0.578581690788269,
"learning_rate": 0.00010277777777777778,
"loss": 0.2079,
"step": 5290
},
{
"epoch": 9.81,
"grad_norm": 0.7213098406791687,
"learning_rate": 9.351851851851852e-05,
"loss": 0.2709,
"step": 5300
},
{
"epoch": 9.82,
"grad_norm": 0.9571123123168945,
"learning_rate": 8.425925925925925e-05,
"loss": 0.2925,
"step": 5310
},
{
"epoch": 9.84,
"grad_norm": 1.0856187343597412,
"learning_rate": 7.5e-05,
"loss": 0.2708,
"step": 5320
},
{
"epoch": 9.86,
"grad_norm": 0.4601670801639557,
"learning_rate": 6.574074074074075e-05,
"loss": 0.1895,
"step": 5330
},
{
"epoch": 9.88,
"grad_norm": 0.7248308062553406,
"learning_rate": 5.648148148148148e-05,
"loss": 0.2639,
"step": 5340
},
{
"epoch": 9.9,
"grad_norm": 0.8658019304275513,
"learning_rate": 4.722222222222222e-05,
"loss": 0.2721,
"step": 5350
},
{
"epoch": 9.92,
"grad_norm": 0.877480685710907,
"learning_rate": 3.7962962962962964e-05,
"loss": 0.2026,
"step": 5360
},
{
"epoch": 9.94,
"grad_norm": 0.7867253422737122,
"learning_rate": 2.8703703703703703e-05,
"loss": 0.2431,
"step": 5370
},
{
"epoch": 9.95,
"grad_norm": 0.5923830270767212,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.2207,
"step": 5380
},
{
"epoch": 9.97,
"grad_norm": 0.9413737058639526,
"learning_rate": 1.0185185185185185e-05,
"loss": 0.223,
"step": 5390
},
{
"epoch": 9.99,
"grad_norm": 0.630954921245575,
"learning_rate": 9.259259259259259e-07,
"loss": 0.2169,
"step": 5400
},
{
"epoch": 9.99,
"eval_accuracy": 0.9835156370358958,
"eval_f1": 0.9881773422996485,
"eval_loss": 0.0720474049448967,
"eval_precision": 0.9887528399741967,
"eval_recall": 0.988172197902048,
"eval_runtime": 52.921,
"eval_samples_per_second": 122.654,
"eval_steps_per_second": 7.672,
"step": 5400
},
{
"epoch": 9.99,
"step": 5400,
"total_flos": 2.6962411056639013e+19,
"train_loss": 0.442498734637543,
"train_runtime": 5918.7974,
"train_samples_per_second": 58.426,
"train_steps_per_second": 0.912
}
],
"logging_steps": 10,
"max_steps": 5400,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2.6962411056639013e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}