|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 1.2795905310300704,
|
|
"eval_steps": 500,
|
|
"global_step": 2000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.006397952655150352,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.9893367455747496e-05,
|
|
"loss": 0.0,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.012795905310300703,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.978673491149499e-05,
|
|
"loss": 0.0,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.019193857965451054,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.968010236724249e-05,
|
|
"loss": 0.0,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.025591810620601407,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.9573469822989975e-05,
|
|
"loss": 0.0,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.03198976327575176,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.946683727873747e-05,
|
|
"loss": 0.0,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.03838771593090211,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.936020473448497e-05,
|
|
"loss": 0.0,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.044785668586052464,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.925357219023246e-05,
|
|
"loss": 0.0,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.05118362124120281,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.9146939645979955e-05,
|
|
"loss": 0.0,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.05758157389635317,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.904030710172745e-05,
|
|
"loss": 0.0,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.06397952655150352,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.893367455747495e-05,
|
|
"loss": 0.0,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.07037747920665387,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.882704201322244e-05,
|
|
"loss": 0.0,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.07677543186180422,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.872040946896993e-05,
|
|
"loss": 0.0,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.08317338451695458,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.861377692471742e-05,
|
|
"loss": 0.0,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.08957133717210493,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.850714438046492e-05,
|
|
"loss": 0.0,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.09596928982725528,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.8400511836212415e-05,
|
|
"loss": 0.0,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.10236724248240563,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.829387929195991e-05,
|
|
"loss": 0.0,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.10876519513755598,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.81872467477074e-05,
|
|
"loss": 0.0,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 0.11516314779270634,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.80806142034549e-05,
|
|
"loss": 0.0,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 0.12156110044785669,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.797398165920239e-05,
|
|
"loss": 0.0,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 0.12795905310300704,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.786734911494988e-05,
|
|
"loss": 0.0,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.1343570057581574,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.7760716570697375e-05,
|
|
"loss": 0.0,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 0.14075495841330773,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.7654084026444875e-05,
|
|
"loss": 0.0,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 0.1471529110684581,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.754745148219237e-05,
|
|
"loss": 0.0,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 0.15355086372360843,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.744081893793986e-05,
|
|
"loss": 0.0,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 0.1599488163787588,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.7334186393687355e-05,
|
|
"loss": 0.0,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.16634676903390916,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.7227553849434855e-05,
|
|
"loss": 0.0,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 0.1727447216890595,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.712092130518234e-05,
|
|
"loss": 0.0,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 0.17914267434420986,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.7014288760929834e-05,
|
|
"loss": 0.0,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 0.1855406269993602,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.6907656216677334e-05,
|
|
"loss": 0.0,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 0.19193857965451055,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.680102367242483e-05,
|
|
"loss": 0.0,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.19833653230966092,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.669439112817232e-05,
|
|
"loss": 0.0,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 0.20473448496481125,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.6587758583919814e-05,
|
|
"loss": 0.0,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 0.21113243761996162,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.6481126039667314e-05,
|
|
"loss": 0.0,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 0.21753039027511195,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.63744934954148e-05,
|
|
"loss": 0.0,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 0.22392834293026231,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.6267860951162294e-05,
|
|
"loss": 0.0,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.23032629558541268,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.616122840690979e-05,
|
|
"loss": 0.0,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 0.236724248240563,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.605459586265729e-05,
|
|
"loss": 0.0,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 0.24312220089571338,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.594796331840478e-05,
|
|
"loss": 0.0,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 0.2495201535508637,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.5841330774152274e-05,
|
|
"loss": 0.0,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 0.2559181062060141,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.573469822989977e-05,
|
|
"loss": 0.0,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.26231605886116444,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.562806568564727e-05,
|
|
"loss": 0.0,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 0.2687140115163148,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.5521433141394754e-05,
|
|
"loss": 0.0,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 0.2751119641714651,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.541480059714225e-05,
|
|
"loss": 0.0,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 0.28150991682661547,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.530816805288974e-05,
|
|
"loss": 0.0,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 0.28790786948176583,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.520153550863724e-05,
|
|
"loss": 0.0,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 0.2943058221369162,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.5094902964384734e-05,
|
|
"loss": 0.0,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 0.30070377479206656,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.498827042013223e-05,
|
|
"loss": 0.0,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 0.30710172744721687,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.488163787587972e-05,
|
|
"loss": 0.0,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 0.31349968010236723,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.477500533162721e-05,
|
|
"loss": 0.0,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 0.3198976327575176,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.466837278737471e-05,
|
|
"loss": 0.0,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.32629558541266795,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.45617402431222e-05,
|
|
"loss": 0.0,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 0.3326935380678183,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.44551076988697e-05,
|
|
"loss": 0.0,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 0.3390914907229686,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.434847515461719e-05,
|
|
"loss": 0.0,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 0.345489443378119,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.4241842610364687e-05,
|
|
"loss": 0.0,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 0.35188739603326935,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.413521006611218e-05,
|
|
"loss": 0.0,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 0.3582853486884197,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.402857752185967e-05,
|
|
"loss": 0.0,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 0.3646833013435701,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.3921944977607166e-05,
|
|
"loss": 0.0,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 0.3710812539987204,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.381531243335466e-05,
|
|
"loss": 0.0,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 0.37747920665387075,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.370867988910215e-05,
|
|
"loss": 0.0,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 0.3838771593090211,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.360204734484965e-05,
|
|
"loss": 0.0,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.3902751119641715,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.3495414800597146e-05,
|
|
"loss": 0.0,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 0.39667306461932184,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.338878225634464e-05,
|
|
"loss": 0.0,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 0.40307101727447214,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.328214971209213e-05,
|
|
"loss": 0.0,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 0.4094689699296225,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.3175517167839626e-05,
|
|
"loss": 0.0,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 0.41586692258477287,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.306888462358712e-05,
|
|
"loss": 0.0,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 0.42226487523992323,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.296225207933461e-05,
|
|
"loss": 0.0,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 0.4286628278950736,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.2855619535082106e-05,
|
|
"loss": 0.0,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 0.4350607805502239,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.2748986990829606e-05,
|
|
"loss": 0.0,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 0.44145873320537427,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.26423544465771e-05,
|
|
"loss": 0.0,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 0.44785668586052463,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.253572190232459e-05,
|
|
"loss": 0.0,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.454254638515675,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.2429089358072086e-05,
|
|
"loss": 0.0,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 0.46065259117082535,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.232245681381958e-05,
|
|
"loss": 0.0,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 0.46705054382597566,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.221582426956707e-05,
|
|
"loss": 0.0,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 0.473448496481126,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.2109191725314565e-05,
|
|
"loss": 0.0,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 0.4798464491362764,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.2002559181062066e-05,
|
|
"loss": 0.0,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 0.48624440179142675,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.189592663680956e-05,
|
|
"loss": 0.0,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 0.4926423544465771,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.178929409255705e-05,
|
|
"loss": 0.0,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 0.4990403071017274,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.1682661548304545e-05,
|
|
"loss": 0.0,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 0.5054382597568778,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.157602900405204e-05,
|
|
"loss": 0.0,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 0.5118362124120281,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.146939645979953e-05,
|
|
"loss": 0.0,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.5182341650671785,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.1362763915547025e-05,
|
|
"loss": 0.0,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 0.5246321177223289,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.125613137129452e-05,
|
|
"loss": 0.0,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 0.5310300703774792,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.114949882704202e-05,
|
|
"loss": 0.0,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 0.5374280230326296,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.104286628278951e-05,
|
|
"loss": 0.0,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 0.5438259756877799,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.0936233738537005e-05,
|
|
"loss": 0.0,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 0.5502239283429302,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.08296011942845e-05,
|
|
"loss": 0.0,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 0.5566218809980806,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.072296865003199e-05,
|
|
"loss": 0.0,
|
|
"step": 870
|
|
},
|
|
{
|
|
"epoch": 0.5630198336532309,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.0616336105779485e-05,
|
|
"loss": 0.0,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 0.5694177863083814,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.050970356152698e-05,
|
|
"loss": 0.0,
|
|
"step": 890
|
|
},
|
|
{
|
|
"epoch": 0.5758157389635317,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.040307101727447e-05,
|
|
"loss": 0.0,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.582213691618682,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.029643847302197e-05,
|
|
"loss": 0.0,
|
|
"step": 910
|
|
},
|
|
{
|
|
"epoch": 0.5886116442738324,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.0189805928769465e-05,
|
|
"loss": 0.0,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 0.5950095969289827,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 4.008317338451696e-05,
|
|
"loss": 0.0,
|
|
"step": 930
|
|
},
|
|
{
|
|
"epoch": 0.6014075495841331,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.997654084026445e-05,
|
|
"loss": 0.0,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 0.6078055022392834,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.9869908296011945e-05,
|
|
"loss": 0.0,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 0.6142034548944337,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.976327575175944e-05,
|
|
"loss": 0.0,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 0.6206014075495841,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.965664320750693e-05,
|
|
"loss": 0.0,
|
|
"step": 970
|
|
},
|
|
{
|
|
"epoch": 0.6269993602047345,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.9550010663254424e-05,
|
|
"loss": 0.0,
|
|
"step": 980
|
|
},
|
|
{
|
|
"epoch": 0.6333973128598849,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.9443378119001924e-05,
|
|
"loss": 0.0,
|
|
"step": 990
|
|
},
|
|
{
|
|
"epoch": 0.6397952655150352,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.933674557474942e-05,
|
|
"loss": 0.0,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.6461932181701855,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.923011303049691e-05,
|
|
"loss": 0.0,
|
|
"step": 1010
|
|
},
|
|
{
|
|
"epoch": 0.6525911708253359,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.9123480486244404e-05,
|
|
"loss": 0.0,
|
|
"step": 1020
|
|
},
|
|
{
|
|
"epoch": 0.6589891234804862,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.90168479419919e-05,
|
|
"loss": 0.0,
|
|
"step": 1030
|
|
},
|
|
{
|
|
"epoch": 0.6653870761356366,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.891021539773939e-05,
|
|
"loss": 0.0,
|
|
"step": 1040
|
|
},
|
|
{
|
|
"epoch": 0.6717850287907869,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.8803582853486884e-05,
|
|
"loss": 0.0,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 0.6781829814459372,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.8696950309234384e-05,
|
|
"loss": 0.0,
|
|
"step": 1060
|
|
},
|
|
{
|
|
"epoch": 0.6845809341010877,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.859031776498188e-05,
|
|
"loss": 0.0,
|
|
"step": 1070
|
|
},
|
|
{
|
|
"epoch": 0.690978886756238,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.848368522072937e-05,
|
|
"loss": 0.0,
|
|
"step": 1080
|
|
},
|
|
{
|
|
"epoch": 0.6973768394113884,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.837705267647686e-05,
|
|
"loss": 0.0,
|
|
"step": 1090
|
|
},
|
|
{
|
|
"epoch": 0.7037747920665387,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.827042013222436e-05,
|
|
"loss": 0.0,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.710172744721689,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.816378758797185e-05,
|
|
"loss": 0.0,
|
|
"step": 1110
|
|
},
|
|
{
|
|
"epoch": 0.7165706973768394,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.8057155043719344e-05,
|
|
"loss": 0.0,
|
|
"step": 1120
|
|
},
|
|
{
|
|
"epoch": 0.7229686500319897,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.795052249946684e-05,
|
|
"loss": 0.0,
|
|
"step": 1130
|
|
},
|
|
{
|
|
"epoch": 0.7293666026871402,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.784388995521434e-05,
|
|
"loss": 0.0,
|
|
"step": 1140
|
|
},
|
|
{
|
|
"epoch": 0.7357645553422905,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.773725741096183e-05,
|
|
"loss": 0.0,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 0.7421625079974408,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.7630624866709324e-05,
|
|
"loss": 0.0,
|
|
"step": 1160
|
|
},
|
|
{
|
|
"epoch": 0.7485604606525912,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.752399232245682e-05,
|
|
"loss": 0.0,
|
|
"step": 1170
|
|
},
|
|
{
|
|
"epoch": 0.7549584133077415,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.741735977820431e-05,
|
|
"loss": 0.0,
|
|
"step": 1180
|
|
},
|
|
{
|
|
"epoch": 0.7613563659628919,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.73107272339518e-05,
|
|
"loss": 0.0,
|
|
"step": 1190
|
|
},
|
|
{
|
|
"epoch": 0.7677543186180422,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.72040946896993e-05,
|
|
"loss": 0.0,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.7741522712731925,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.709746214544679e-05,
|
|
"loss": 0.0,
|
|
"step": 1210
|
|
},
|
|
{
|
|
"epoch": 0.780550223928343,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.699082960119429e-05,
|
|
"loss": 0.0,
|
|
"step": 1220
|
|
},
|
|
{
|
|
"epoch": 0.7869481765834933,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.688419705694178e-05,
|
|
"loss": 0.0,
|
|
"step": 1230
|
|
},
|
|
{
|
|
"epoch": 0.7933461292386437,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.677756451268927e-05,
|
|
"loss": 0.0,
|
|
"step": 1240
|
|
},
|
|
{
|
|
"epoch": 0.799744081893794,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.667093196843677e-05,
|
|
"loss": 0.0,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 0.8061420345489443,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.656429942418426e-05,
|
|
"loss": 0.0,
|
|
"step": 1260
|
|
},
|
|
{
|
|
"epoch": 0.8125399872040947,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.6457666879931756e-05,
|
|
"loss": 0.0,
|
|
"step": 1270
|
|
},
|
|
{
|
|
"epoch": 0.818937939859245,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.635103433567925e-05,
|
|
"loss": 0.0,
|
|
"step": 1280
|
|
},
|
|
{
|
|
"epoch": 0.8253358925143954,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.624440179142675e-05,
|
|
"loss": 0.0,
|
|
"step": 1290
|
|
},
|
|
{
|
|
"epoch": 0.8317338451695457,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.613776924717424e-05,
|
|
"loss": 0.0,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 0.838131797824696,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.6031136702921736e-05,
|
|
"loss": 0.0,
|
|
"step": 1310
|
|
},
|
|
{
|
|
"epoch": 0.8445297504798465,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.592450415866922e-05,
|
|
"loss": 0.0,
|
|
"step": 1320
|
|
},
|
|
{
|
|
"epoch": 0.8509277031349968,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.581787161441672e-05,
|
|
"loss": 0.0,
|
|
"step": 1330
|
|
},
|
|
{
|
|
"epoch": 0.8573256557901472,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.5711239070164216e-05,
|
|
"loss": 0.0,
|
|
"step": 1340
|
|
},
|
|
{
|
|
"epoch": 0.8637236084452975,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.560460652591171e-05,
|
|
"loss": 0.0,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 0.8701215611004478,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.54979739816592e-05,
|
|
"loss": 0.0,
|
|
"step": 1360
|
|
},
|
|
{
|
|
"epoch": 0.8765195137555982,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.53913414374067e-05,
|
|
"loss": 0.0,
|
|
"step": 1370
|
|
},
|
|
{
|
|
"epoch": 0.8829174664107485,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.5284708893154196e-05,
|
|
"loss": 0.0,
|
|
"step": 1380
|
|
},
|
|
{
|
|
"epoch": 0.889315419065899,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.517807634890168e-05,
|
|
"loss": 0.0,
|
|
"step": 1390
|
|
},
|
|
{
|
|
"epoch": 0.8957133717210493,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.5071443804649176e-05,
|
|
"loss": 0.0,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 0.9021113243761996,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.4964811260396676e-05,
|
|
"loss": 0.0,
|
|
"step": 1410
|
|
},
|
|
{
|
|
"epoch": 0.90850927703135,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.485817871614417e-05,
|
|
"loss": 0.0,
|
|
"step": 1420
|
|
},
|
|
{
|
|
"epoch": 0.9149072296865003,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.475154617189166e-05,
|
|
"loss": 0.0,
|
|
"step": 1430
|
|
},
|
|
{
|
|
"epoch": 0.9213051823416507,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.4644913627639155e-05,
|
|
"loss": 0.0,
|
|
"step": 1440
|
|
},
|
|
{
|
|
"epoch": 0.927703134996801,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.4538281083386656e-05,
|
|
"loss": 0.0,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 0.9341010876519513,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.443164853913415e-05,
|
|
"loss": 0.0,
|
|
"step": 1460
|
|
},
|
|
{
|
|
"epoch": 0.9404990403071017,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.4325015994881635e-05,
|
|
"loss": 0.0,
|
|
"step": 1470
|
|
},
|
|
{
|
|
"epoch": 0.946896992962252,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.4218383450629135e-05,
|
|
"loss": 0.0,
|
|
"step": 1480
|
|
},
|
|
{
|
|
"epoch": 0.9532949456174025,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.411175090637663e-05,
|
|
"loss": 0.0,
|
|
"step": 1490
|
|
},
|
|
{
|
|
"epoch": 0.9596928982725528,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.400511836212412e-05,
|
|
"loss": 0.0,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.9660908509277031,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.3898485817871615e-05,
|
|
"loss": 0.0,
|
|
"step": 1510
|
|
},
|
|
{
|
|
"epoch": 0.9724888035828535,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.379185327361911e-05,
|
|
"loss": 0.0,
|
|
"step": 1520
|
|
},
|
|
{
|
|
"epoch": 0.9788867562380038,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.368522072936661e-05,
|
|
"loss": 0.0,
|
|
"step": 1530
|
|
},
|
|
{
|
|
"epoch": 0.9852847088931542,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.3578588185114095e-05,
|
|
"loss": 0.0,
|
|
"step": 1540
|
|
},
|
|
{
|
|
"epoch": 0.9916826615483045,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.347195564086159e-05,
|
|
"loss": 0.0,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 0.9980806142034548,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.336532309660909e-05,
|
|
"loss": 0.0,
|
|
"step": 1560
|
|
},
|
|
{
|
|
"epoch": 1.0044785668586051,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.325869055235658e-05,
|
|
"loss": 0.0,
|
|
"step": 1570
|
|
},
|
|
{
|
|
"epoch": 1.0108765195137557,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.3152058008104075e-05,
|
|
"loss": 0.0,
|
|
"step": 1580
|
|
},
|
|
{
|
|
"epoch": 1.017274472168906,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.304542546385157e-05,
|
|
"loss": 0.0,
|
|
"step": 1590
|
|
},
|
|
{
|
|
"epoch": 1.0236724248240563,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.293879291959907e-05,
|
|
"loss": 0.0,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.0300703774792066,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.283216037534656e-05,
|
|
"loss": 0.0,
|
|
"step": 1610
|
|
},
|
|
{
|
|
"epoch": 1.036468330134357,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.272552783109405e-05,
|
|
"loss": 0.0,
|
|
"step": 1620
|
|
},
|
|
{
|
|
"epoch": 1.0428662827895074,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.261889528684154e-05,
|
|
"loss": 0.0,
|
|
"step": 1630
|
|
},
|
|
{
|
|
"epoch": 1.0492642354446577,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.251226274258904e-05,
|
|
"loss": 0.0,
|
|
"step": 1640
|
|
},
|
|
{
|
|
"epoch": 1.055662188099808,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.2405630198336535e-05,
|
|
"loss": 0.0,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"epoch": 1.0620601407549584,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.229899765408403e-05,
|
|
"loss": 0.0,
|
|
"step": 1660
|
|
},
|
|
{
|
|
"epoch": 1.0684580934101087,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.219236510983152e-05,
|
|
"loss": 0.0,
|
|
"step": 1670
|
|
},
|
|
{
|
|
"epoch": 1.0748560460652592,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.208573256557902e-05,
|
|
"loss": 0.0,
|
|
"step": 1680
|
|
},
|
|
{
|
|
"epoch": 1.0812539987204095,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.197910002132651e-05,
|
|
"loss": 0.0,
|
|
"step": 1690
|
|
},
|
|
{
|
|
"epoch": 1.0876519513755598,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.1872467477074e-05,
|
|
"loss": 0.0,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.0940499040307101,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.17658349328215e-05,
|
|
"loss": 0.0,
|
|
"step": 1710
|
|
},
|
|
{
|
|
"epoch": 1.1004478566858604,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.1659202388568994e-05,
|
|
"loss": 0.0,
|
|
"step": 1720
|
|
},
|
|
{
|
|
"epoch": 1.106845809341011,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.155256984431649e-05,
|
|
"loss": 0.0,
|
|
"step": 1730
|
|
},
|
|
{
|
|
"epoch": 1.1132437619961613,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.144593730006398e-05,
|
|
"loss": 0.0,
|
|
"step": 1740
|
|
},
|
|
{
|
|
"epoch": 1.1196417146513116,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.1339304755811474e-05,
|
|
"loss": 0.0,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"epoch": 1.1260396673064619,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.1232672211558974e-05,
|
|
"loss": 0.0,
|
|
"step": 1760
|
|
},
|
|
{
|
|
"epoch": 1.1324376199616122,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.112603966730646e-05,
|
|
"loss": 0.0,
|
|
"step": 1770
|
|
},
|
|
{
|
|
"epoch": 1.1388355726167627,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.1019407123053954e-05,
|
|
"loss": 0.0,
|
|
"step": 1780
|
|
},
|
|
{
|
|
"epoch": 1.145233525271913,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.0912774578801454e-05,
|
|
"loss": 0.0,
|
|
"step": 1790
|
|
},
|
|
{
|
|
"epoch": 1.1516314779270633,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.080614203454895e-05,
|
|
"loss": 0.0,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.1580294305822136,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.069950949029644e-05,
|
|
"loss": 0.0,
|
|
"step": 1810
|
|
},
|
|
{
|
|
"epoch": 1.164427383237364,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.0592876946043934e-05,
|
|
"loss": 0.0,
|
|
"step": 1820
|
|
},
|
|
{
|
|
"epoch": 1.1708253358925145,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.048624440179143e-05,
|
|
"loss": 0.0,
|
|
"step": 1830
|
|
},
|
|
{
|
|
"epoch": 1.1772232885476648,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.037961185753892e-05,
|
|
"loss": 0.0,
|
|
"step": 1840
|
|
},
|
|
{
|
|
"epoch": 1.183621241202815,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.0272979313286414e-05,
|
|
"loss": 0.0,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"epoch": 1.1900191938579654,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.016634676903391e-05,
|
|
"loss": 0.0,
|
|
"step": 1860
|
|
},
|
|
{
|
|
"epoch": 1.1964171465131157,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 3.0059714224781403e-05,
|
|
"loss": 0.0,
|
|
"step": 1870
|
|
},
|
|
{
|
|
"epoch": 1.2028150991682662,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.99530816805289e-05,
|
|
"loss": 0.0,
|
|
"step": 1880
|
|
},
|
|
{
|
|
"epoch": 1.2092130518234165,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9846449136276393e-05,
|
|
"loss": 0.0,
|
|
"step": 1890
|
|
},
|
|
{
|
|
"epoch": 1.2156110044785668,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.973981659202389e-05,
|
|
"loss": 0.0,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.2220089571337172,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9633184047771383e-05,
|
|
"loss": 0.0,
|
|
"step": 1910
|
|
},
|
|
{
|
|
"epoch": 1.2284069097888675,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9526551503518873e-05,
|
|
"loss": 0.0,
|
|
"step": 1920
|
|
},
|
|
{
|
|
"epoch": 1.234804862444018,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.941991895926637e-05,
|
|
"loss": 0.0,
|
|
"step": 1930
|
|
},
|
|
{
|
|
"epoch": 1.2412028150991683,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9313286415013863e-05,
|
|
"loss": 0.0,
|
|
"step": 1940
|
|
},
|
|
{
|
|
"epoch": 1.2476007677543186,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9206653870761356e-05,
|
|
"loss": 0.0,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"epoch": 1.253998720409469,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.9100021326508853e-05,
|
|
"loss": 0.0,
|
|
"step": 1960
|
|
},
|
|
{
|
|
"epoch": 1.2603966730646192,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.8993388782256346e-05,
|
|
"loss": 0.0,
|
|
"step": 1970
|
|
},
|
|
{
|
|
"epoch": 1.2667946257197698,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.8886756238003843e-05,
|
|
"loss": 0.0,
|
|
"step": 1980
|
|
},
|
|
{
|
|
"epoch": 1.27319257837492,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.8780123693751333e-05,
|
|
"loss": 0.0,
|
|
"step": 1990
|
|
},
|
|
{
|
|
"epoch": 1.2795905310300704,
|
|
"grad_norm": NaN,
|
|
"learning_rate": 2.8673491149498826e-05,
|
|
"loss": 0.0,
|
|
"step": 2000
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 4689,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 0.0,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|