ZeroUniqueness's picture
Training in progress, step 2900
422dfa8
raw
history blame
14.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1244668476153548,
"global_step": 2900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 0.0001999867761371633,
"loss": 1.0435,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 0.00019993306018843102,
"loss": 0.8918,
"step": 100
},
{
"epoch": 0.06,
"learning_rate": 0.00019983804784290833,
"loss": 0.8874,
"step": 150
},
{
"epoch": 0.08,
"learning_rate": 0.00019970177836355307,
"loss": 0.8839,
"step": 200
},
{
"epoch": 0.09,
"learning_rate": 0.00019961818913082012,
"loss": 0.8801,
"step": 225
},
{
"epoch": 0.1,
"learning_rate": 0.00019952430806244534,
"loss": 0.8753,
"step": 250
},
{
"epoch": 0.11,
"learning_rate": 0.00019942014485754635,
"loss": 0.8754,
"step": 275
},
{
"epoch": 0.12,
"learning_rate": 0.00019930571027751713,
"loss": 0.8751,
"step": 300
},
{
"epoch": 0.13,
"learning_rate": 0.0001991810161449164,
"loss": 0.8819,
"step": 325
},
{
"epoch": 0.14,
"learning_rate": 0.00019904607534224612,
"loss": 0.8744,
"step": 350
},
{
"epoch": 0.15,
"learning_rate": 0.00019890090181062063,
"loss": 0.8735,
"step": 375
},
{
"epoch": 0.16,
"learning_rate": 0.00019874551054832625,
"loss": 0.8703,
"step": 400
},
{
"epoch": 0.16,
"learning_rate": 0.00019857991760927193,
"loss": 0.8715,
"step": 425
},
{
"epoch": 0.17,
"learning_rate": 0.00019840414010133045,
"loss": 0.8714,
"step": 450
},
{
"epoch": 0.18,
"learning_rate": 0.00019821819618457114,
"loss": 0.8653,
"step": 475
},
{
"epoch": 0.19,
"learning_rate": 0.0001980221050693837,
"loss": 0.8716,
"step": 500
},
{
"epoch": 0.2,
"learning_rate": 0.00019781588701449338,
"loss": 0.8695,
"step": 525
},
{
"epoch": 0.21,
"learning_rate": 0.0001975995633248682,
"loss": 0.8746,
"step": 550
},
{
"epoch": 0.22,
"learning_rate": 0.00019737315634951762,
"loss": 0.8731,
"step": 575
},
{
"epoch": 0.23,
"learning_rate": 0.00019713668947918386,
"loss": 0.867,
"step": 600
},
{
"epoch": 0.24,
"learning_rate": 0.0001968901871439252,
"loss": 0.8706,
"step": 625
},
{
"epoch": 0.25,
"learning_rate": 0.000196633674810592,
"loss": 0.8595,
"step": 650
},
{
"epoch": 0.26,
"learning_rate": 0.0001963671789801958,
"loss": 0.8627,
"step": 675
},
{
"epoch": 0.27,
"learning_rate": 0.0001960907271851712,
"loss": 0.8607,
"step": 700
},
{
"epoch": 0.28,
"learning_rate": 0.00019580434798653173,
"loss": 0.858,
"step": 725
},
{
"epoch": 0.29,
"learning_rate": 0.00019550807097091876,
"loss": 0.8589,
"step": 750
},
{
"epoch": 0.3,
"learning_rate": 0.00019520192674754515,
"loss": 0.8561,
"step": 775
},
{
"epoch": 0.31,
"learning_rate": 0.00019488594694503264,
"loss": 0.8576,
"step": 800
},
{
"epoch": 0.32,
"learning_rate": 0.00019456016420814446,
"loss": 0.8597,
"step": 825
},
{
"epoch": 0.33,
"learning_rate": 0.00019422461219441254,
"loss": 0.862,
"step": 850
},
{
"epoch": 0.34,
"learning_rate": 0.00019387932557066035,
"loss": 0.8577,
"step": 875
},
{
"epoch": 0.35,
"learning_rate": 0.00019352434000942127,
"loss": 0.8632,
"step": 900
},
{
"epoch": 0.36,
"learning_rate": 0.00019315969218525333,
"loss": 0.8567,
"step": 925
},
{
"epoch": 0.37,
"learning_rate": 0.00019278541977095005,
"loss": 0.8501,
"step": 950
},
{
"epoch": 0.38,
"learning_rate": 0.00019240156143364844,
"loss": 0.8596,
"step": 975
},
{
"epoch": 0.39,
"learning_rate": 0.00019200815683083434,
"loss": 0.8556,
"step": 1000
},
{
"epoch": 0.39,
"eval_loss": 0.8521950244903564,
"eval_runtime": 59.8838,
"eval_samples_per_second": 12.19,
"eval_steps_per_second": 0.885,
"step": 1000
},
{
"epoch": 0.4,
"learning_rate": 0.00019160524660624505,
"loss": 0.8531,
"step": 1025
},
{
"epoch": 0.41,
"learning_rate": 0.00019119287238567045,
"loss": 0.8513,
"step": 1050
},
{
"epoch": 0.42,
"learning_rate": 0.00019077107677265253,
"loss": 0.8502,
"step": 1075
},
{
"epoch": 0.43,
"learning_rate": 0.00019033990334408384,
"loss": 0.8469,
"step": 1100
},
{
"epoch": 0.44,
"learning_rate": 0.00018989939664570545,
"loss": 0.8495,
"step": 1125
},
{
"epoch": 0.45,
"learning_rate": 0.00018944960218750484,
"loss": 0.8485,
"step": 1150
},
{
"epoch": 0.46,
"learning_rate": 0.00018899056643901404,
"loss": 0.8534,
"step": 1175
},
{
"epoch": 0.47,
"learning_rate": 0.00018852233682450893,
"loss": 0.8531,
"step": 1200
},
{
"epoch": 0.47,
"learning_rate": 0.00018804496171810948,
"loss": 0.8509,
"step": 1225
},
{
"epoch": 0.48,
"learning_rate": 0.00018755849043878222,
"loss": 0.8445,
"step": 1250
},
{
"epoch": 0.49,
"learning_rate": 0.0001870629732452449,
"loss": 0.8548,
"step": 1275
},
{
"epoch": 0.5,
"learning_rate": 0.00018655846133077417,
"loss": 0.8441,
"step": 1300
},
{
"epoch": 0.51,
"learning_rate": 0.00018604500681791656,
"loss": 0.8533,
"step": 1325
},
{
"epoch": 0.52,
"learning_rate": 0.00018552266275310373,
"loss": 0.8505,
"step": 1350
},
{
"epoch": 0.53,
"learning_rate": 0.0001849914831011719,
"loss": 0.8544,
"step": 1375
},
{
"epoch": 0.54,
"learning_rate": 0.00018445152273978668,
"loss": 0.845,
"step": 1400
},
{
"epoch": 0.55,
"learning_rate": 0.00018390283745377354,
"loss": 0.8376,
"step": 1425
},
{
"epoch": 0.56,
"learning_rate": 0.0001833454839293545,
"loss": 0.847,
"step": 1450
},
{
"epoch": 0.57,
"learning_rate": 0.00018277951974829163,
"loss": 0.8473,
"step": 1475
},
{
"epoch": 0.58,
"learning_rate": 0.0001822050033819382,
"loss": 0.8438,
"step": 1500
},
{
"epoch": 0.59,
"learning_rate": 0.00018162199418519785,
"loss": 0.8418,
"step": 1525
},
{
"epoch": 0.6,
"learning_rate": 0.00018103055239039243,
"loss": 0.842,
"step": 1550
},
{
"epoch": 0.61,
"learning_rate": 0.0001804307391010393,
"loss": 0.8435,
"step": 1575
},
{
"epoch": 0.62,
"learning_rate": 0.00017982261628553842,
"loss": 0.8349,
"step": 1600
},
{
"epoch": 0.63,
"learning_rate": 0.0001792062467707703,
"loss": 0.8483,
"step": 1625
},
{
"epoch": 0.64,
"learning_rate": 0.0001785816942356052,
"loss": 0.8387,
"step": 1650
},
{
"epoch": 0.65,
"learning_rate": 0.00017794902320432429,
"loss": 0.843,
"step": 1675
},
{
"epoch": 0.66,
"learning_rate": 0.00017730829903995333,
"loss": 0.8424,
"step": 1700
},
{
"epoch": 0.67,
"learning_rate": 0.00017665958793751006,
"loss": 0.8418,
"step": 1725
},
{
"epoch": 0.68,
"learning_rate": 0.00017600295691716522,
"loss": 0.8384,
"step": 1750
},
{
"epoch": 0.69,
"learning_rate": 0.00017533847381731856,
"loss": 0.8445,
"step": 1775
},
{
"epoch": 0.7,
"learning_rate": 0.00017466620728759033,
"loss": 0.8446,
"step": 1800
},
{
"epoch": 0.71,
"learning_rate": 0.00017398622678172878,
"loss": 0.838,
"step": 1825
},
{
"epoch": 0.72,
"learning_rate": 0.0001732986025504348,
"loss": 0.8415,
"step": 1850
},
{
"epoch": 0.73,
"learning_rate": 0.000172603405634104,
"loss": 0.8357,
"step": 1875
},
{
"epoch": 0.74,
"learning_rate": 0.00017190070785548755,
"loss": 0.8311,
"step": 1900
},
{
"epoch": 0.75,
"learning_rate": 0.0001711905818122717,
"loss": 0.8333,
"step": 1925
},
{
"epoch": 0.76,
"learning_rate": 0.0001704731008695777,
"loss": 0.8387,
"step": 1950
},
{
"epoch": 0.77,
"learning_rate": 0.0001697483391523821,
"loss": 0.8442,
"step": 1975
},
{
"epoch": 0.78,
"learning_rate": 0.00016901637153785885,
"loss": 0.8399,
"step": 2000
},
{
"epoch": 0.78,
"eval_loss": 0.8339959383010864,
"eval_runtime": 58.5829,
"eval_samples_per_second": 12.461,
"eval_steps_per_second": 0.905,
"step": 2000
},
{
"epoch": 0.79,
"learning_rate": 0.0001682772736476434,
"loss": 0.8334,
"step": 2025
},
{
"epoch": 0.79,
"learning_rate": 0.0001675311218400201,
"loss": 0.835,
"step": 2050
},
{
"epoch": 0.8,
"learning_rate": 0.00016677799320203332,
"loss": 0.8368,
"step": 2075
},
{
"epoch": 0.81,
"learning_rate": 0.00016601796554152344,
"loss": 0.8278,
"step": 2100
},
{
"epoch": 0.82,
"learning_rate": 0.00016525111737908827,
"loss": 0.8334,
"step": 2125
},
{
"epoch": 0.83,
"learning_rate": 0.00016447752793997096,
"loss": 0.8416,
"step": 2150
},
{
"epoch": 0.84,
"learning_rate": 0.00016369727714587483,
"loss": 0.8297,
"step": 2175
},
{
"epoch": 0.85,
"learning_rate": 0.0001629104456067066,
"loss": 0.8327,
"step": 2200
},
{
"epoch": 0.86,
"learning_rate": 0.00016211711461224825,
"loss": 0.8324,
"step": 2225
},
{
"epoch": 0.87,
"learning_rate": 0.0001613173661237589,
"loss": 0.8313,
"step": 2250
},
{
"epoch": 0.88,
"learning_rate": 0.0001605112827655069,
"loss": 0.8292,
"step": 2275
},
{
"epoch": 0.89,
"learning_rate": 0.0001596989478162339,
"loss": 0.8334,
"step": 2300
},
{
"epoch": 0.9,
"learning_rate": 0.00015888044520055106,
"loss": 0.8352,
"step": 2325
},
{
"epoch": 0.91,
"learning_rate": 0.00015805585948026852,
"loss": 0.823,
"step": 2350
},
{
"epoch": 0.92,
"learning_rate": 0.000157225275845659,
"loss": 0.8293,
"step": 2375
},
{
"epoch": 0.93,
"learning_rate": 0.00015638878010665672,
"loss": 0.8289,
"step": 2400
},
{
"epoch": 0.94,
"learning_rate": 0.00015554645868399205,
"loss": 0.832,
"step": 2425
},
{
"epoch": 0.95,
"learning_rate": 0.00015469839860026308,
"loss": 0.8294,
"step": 2450
},
{
"epoch": 0.96,
"learning_rate": 0.0001538446874709452,
"loss": 0.8281,
"step": 2475
},
{
"epoch": 0.97,
"learning_rate": 0.00015298541349533925,
"loss": 0.8314,
"step": 2500
},
{
"epoch": 0.98,
"learning_rate": 0.00015212066544745926,
"loss": 0.831,
"step": 2525
},
{
"epoch": 0.99,
"learning_rate": 0.00015125053266686124,
"loss": 0.8319,
"step": 2550
},
{
"epoch": 1.0,
"learning_rate": 0.00015037510504941303,
"loss": 0.8259,
"step": 2575
},
{
"epoch": 1.01,
"learning_rate": 0.00014949447303800695,
"loss": 0.8133,
"step": 2600
},
{
"epoch": 1.02,
"learning_rate": 0.00014860872761321593,
"loss": 0.8139,
"step": 2625
},
{
"epoch": 1.03,
"learning_rate": 0.00014771796028389405,
"loss": 0.804,
"step": 2650
},
{
"epoch": 1.04,
"learning_rate": 0.0001468222630777225,
"loss": 0.8011,
"step": 2675
},
{
"epoch": 1.05,
"learning_rate": 0.00014592172853170193,
"loss": 0.8037,
"step": 2700
},
{
"epoch": 1.06,
"learning_rate": 0.00014501644968259212,
"loss": 0.8063,
"step": 2725
},
{
"epoch": 1.07,
"learning_rate": 0.00014410652005730025,
"loss": 0.8155,
"step": 2750
},
{
"epoch": 1.08,
"learning_rate": 0.00014319203366321826,
"loss": 0.8066,
"step": 2775
},
{
"epoch": 1.09,
"learning_rate": 0.0001422730849785107,
"loss": 0.8091,
"step": 2800
},
{
"epoch": 1.1,
"learning_rate": 0.0001413497689423539,
"loss": 0.8067,
"step": 2825
},
{
"epoch": 1.11,
"learning_rate": 0.00014042218094512755,
"loss": 0.8046,
"step": 2850
},
{
"epoch": 1.11,
"learning_rate": 0.00013949041681855985,
"loss": 0.8053,
"step": 2875
},
{
"epoch": 1.12,
"learning_rate": 0.0001385545728258264,
"loss": 0.8075,
"step": 2900
}
],
"max_steps": 7737,
"num_train_epochs": 3,
"total_flos": 1.248869156726086e+19,
"trial_name": null,
"trial_params": null
}