0x1202's picture
Training in progress, step 200, checkpoint
9bd326b verified
{
"best_metric": 1.4853540658950806,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 2.26628895184136,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0113314447592068,
"grad_norm": 0.3300837278366089,
"learning_rate": 5e-06,
"loss": 1.8331,
"step": 1
},
{
"epoch": 0.0113314447592068,
"eval_loss": 1.8883453607559204,
"eval_runtime": 4.1388,
"eval_samples_per_second": 36.001,
"eval_steps_per_second": 18.121,
"step": 1
},
{
"epoch": 0.0226628895184136,
"grad_norm": 0.32841190695762634,
"learning_rate": 1e-05,
"loss": 1.7121,
"step": 2
},
{
"epoch": 0.0339943342776204,
"grad_norm": 0.35538652539253235,
"learning_rate": 1.5e-05,
"loss": 1.7913,
"step": 3
},
{
"epoch": 0.0453257790368272,
"grad_norm": 0.42074787616729736,
"learning_rate": 2e-05,
"loss": 1.8042,
"step": 4
},
{
"epoch": 0.056657223796033995,
"grad_norm": 0.3599228262901306,
"learning_rate": 2.5e-05,
"loss": 1.7287,
"step": 5
},
{
"epoch": 0.0679886685552408,
"grad_norm": 0.36890357732772827,
"learning_rate": 3e-05,
"loss": 1.7747,
"step": 6
},
{
"epoch": 0.07932011331444759,
"grad_norm": 0.3220349848270416,
"learning_rate": 3.5e-05,
"loss": 1.6599,
"step": 7
},
{
"epoch": 0.0906515580736544,
"grad_norm": 0.3235204815864563,
"learning_rate": 4e-05,
"loss": 1.7932,
"step": 8
},
{
"epoch": 0.10198300283286119,
"grad_norm": 0.3298715353012085,
"learning_rate": 4.5e-05,
"loss": 1.79,
"step": 9
},
{
"epoch": 0.11331444759206799,
"grad_norm": 0.3283751308917999,
"learning_rate": 5e-05,
"loss": 1.7863,
"step": 10
},
{
"epoch": 0.12464589235127478,
"grad_norm": 0.26808610558509827,
"learning_rate": 5.500000000000001e-05,
"loss": 1.7071,
"step": 11
},
{
"epoch": 0.1359773371104816,
"grad_norm": 0.2770124673843384,
"learning_rate": 6e-05,
"loss": 1.7944,
"step": 12
},
{
"epoch": 0.14730878186968838,
"grad_norm": 0.28054431080818176,
"learning_rate": 6.500000000000001e-05,
"loss": 1.7242,
"step": 13
},
{
"epoch": 0.15864022662889518,
"grad_norm": 0.32576805353164673,
"learning_rate": 7e-05,
"loss": 1.8406,
"step": 14
},
{
"epoch": 0.16997167138810199,
"grad_norm": 0.2758873999118805,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8154,
"step": 15
},
{
"epoch": 0.1813031161473088,
"grad_norm": 0.2741672694683075,
"learning_rate": 8e-05,
"loss": 1.6475,
"step": 16
},
{
"epoch": 0.19263456090651557,
"grad_norm": 0.3316316306591034,
"learning_rate": 8.5e-05,
"loss": 1.9773,
"step": 17
},
{
"epoch": 0.20396600566572237,
"grad_norm": 0.3196592926979065,
"learning_rate": 9e-05,
"loss": 1.8618,
"step": 18
},
{
"epoch": 0.21529745042492918,
"grad_norm": 0.3236810863018036,
"learning_rate": 9.5e-05,
"loss": 1.9181,
"step": 19
},
{
"epoch": 0.22662889518413598,
"grad_norm": 0.3469908535480499,
"learning_rate": 0.0001,
"loss": 1.8947,
"step": 20
},
{
"epoch": 0.23796033994334279,
"grad_norm": 0.3735293447971344,
"learning_rate": 9.999238475781957e-05,
"loss": 1.9484,
"step": 21
},
{
"epoch": 0.24929178470254956,
"grad_norm": 0.39644795656204224,
"learning_rate": 9.99695413509548e-05,
"loss": 1.8885,
"step": 22
},
{
"epoch": 0.26062322946175637,
"grad_norm": 0.2631559669971466,
"learning_rate": 9.99314767377287e-05,
"loss": 1.6607,
"step": 23
},
{
"epoch": 0.2719546742209632,
"grad_norm": 0.2586005628108978,
"learning_rate": 9.987820251299122e-05,
"loss": 1.6638,
"step": 24
},
{
"epoch": 0.28328611898017,
"grad_norm": 0.26188376545906067,
"learning_rate": 9.980973490458728e-05,
"loss": 1.6913,
"step": 25
},
{
"epoch": 0.29461756373937675,
"grad_norm": 0.2730318009853363,
"learning_rate": 9.972609476841367e-05,
"loss": 1.7569,
"step": 26
},
{
"epoch": 0.3059490084985836,
"grad_norm": 0.25958484411239624,
"learning_rate": 9.962730758206611e-05,
"loss": 1.5928,
"step": 27
},
{
"epoch": 0.31728045325779036,
"grad_norm": 0.23184828460216522,
"learning_rate": 9.951340343707852e-05,
"loss": 1.5851,
"step": 28
},
{
"epoch": 0.3286118980169972,
"grad_norm": 0.2501068115234375,
"learning_rate": 9.938441702975689e-05,
"loss": 1.5588,
"step": 29
},
{
"epoch": 0.33994334277620397,
"grad_norm": 0.23349833488464355,
"learning_rate": 9.924038765061042e-05,
"loss": 1.6563,
"step": 30
},
{
"epoch": 0.35127478753541075,
"grad_norm": 0.2508915364742279,
"learning_rate": 9.908135917238321e-05,
"loss": 1.7599,
"step": 31
},
{
"epoch": 0.3626062322946176,
"grad_norm": 0.20294737815856934,
"learning_rate": 9.890738003669029e-05,
"loss": 1.6546,
"step": 32
},
{
"epoch": 0.37393767705382436,
"grad_norm": 0.2152402102947235,
"learning_rate": 9.871850323926177e-05,
"loss": 1.6345,
"step": 33
},
{
"epoch": 0.38526912181303113,
"grad_norm": 0.21918921172618866,
"learning_rate": 9.851478631379982e-05,
"loss": 1.6768,
"step": 34
},
{
"epoch": 0.39660056657223797,
"grad_norm": 0.2138763815164566,
"learning_rate": 9.829629131445342e-05,
"loss": 1.6876,
"step": 35
},
{
"epoch": 0.40793201133144474,
"grad_norm": 0.2552635371685028,
"learning_rate": 9.806308479691595e-05,
"loss": 1.6482,
"step": 36
},
{
"epoch": 0.4192634560906516,
"grad_norm": 0.2674317955970764,
"learning_rate": 9.781523779815179e-05,
"loss": 1.5806,
"step": 37
},
{
"epoch": 0.43059490084985835,
"grad_norm": 0.2603086531162262,
"learning_rate": 9.755282581475769e-05,
"loss": 1.7077,
"step": 38
},
{
"epoch": 0.44192634560906513,
"grad_norm": 0.28181904554367065,
"learning_rate": 9.727592877996585e-05,
"loss": 1.8476,
"step": 39
},
{
"epoch": 0.45325779036827196,
"grad_norm": 0.231564462184906,
"learning_rate": 9.698463103929542e-05,
"loss": 1.8184,
"step": 40
},
{
"epoch": 0.46458923512747874,
"grad_norm": 0.3045034408569336,
"learning_rate": 9.667902132486009e-05,
"loss": 1.7776,
"step": 41
},
{
"epoch": 0.47592067988668557,
"grad_norm": 0.2897624373435974,
"learning_rate": 9.635919272833938e-05,
"loss": 1.7272,
"step": 42
},
{
"epoch": 0.48725212464589235,
"grad_norm": 0.3477663993835449,
"learning_rate": 9.602524267262203e-05,
"loss": 1.7398,
"step": 43
},
{
"epoch": 0.4985835694050991,
"grad_norm": 0.38361623883247375,
"learning_rate": 9.567727288213005e-05,
"loss": 1.6758,
"step": 44
},
{
"epoch": 0.509915014164306,
"grad_norm": 0.18529650568962097,
"learning_rate": 9.53153893518325e-05,
"loss": 1.7981,
"step": 45
},
{
"epoch": 0.5212464589235127,
"grad_norm": 0.21174763143062592,
"learning_rate": 9.493970231495835e-05,
"loss": 1.5973,
"step": 46
},
{
"epoch": 0.5325779036827195,
"grad_norm": 0.2123510092496872,
"learning_rate": 9.45503262094184e-05,
"loss": 1.5274,
"step": 47
},
{
"epoch": 0.5439093484419264,
"grad_norm": 0.2224183976650238,
"learning_rate": 9.414737964294636e-05,
"loss": 1.681,
"step": 48
},
{
"epoch": 0.5552407932011332,
"grad_norm": 0.19995258748531342,
"learning_rate": 9.373098535696979e-05,
"loss": 1.5011,
"step": 49
},
{
"epoch": 0.56657223796034,
"grad_norm": 0.20688362419605255,
"learning_rate": 9.330127018922194e-05,
"loss": 1.5265,
"step": 50
},
{
"epoch": 0.56657223796034,
"eval_loss": 1.673092007637024,
"eval_runtime": 4.1345,
"eval_samples_per_second": 36.038,
"eval_steps_per_second": 18.14,
"step": 50
},
{
"epoch": 0.5779036827195467,
"grad_norm": 0.22919419407844543,
"learning_rate": 9.285836503510562e-05,
"loss": 1.5773,
"step": 51
},
{
"epoch": 0.5892351274787535,
"grad_norm": 0.2083871066570282,
"learning_rate": 9.24024048078213e-05,
"loss": 1.6264,
"step": 52
},
{
"epoch": 0.6005665722379604,
"grad_norm": 0.23099252581596375,
"learning_rate": 9.193352839727121e-05,
"loss": 1.6245,
"step": 53
},
{
"epoch": 0.6118980169971672,
"grad_norm": 0.18274392187595367,
"learning_rate": 9.145187862775209e-05,
"loss": 1.5639,
"step": 54
},
{
"epoch": 0.623229461756374,
"grad_norm": 0.21337157487869263,
"learning_rate": 9.09576022144496e-05,
"loss": 1.5712,
"step": 55
},
{
"epoch": 0.6345609065155807,
"grad_norm": 0.22220197319984436,
"learning_rate": 9.045084971874738e-05,
"loss": 1.7288,
"step": 56
},
{
"epoch": 0.6458923512747875,
"grad_norm": 0.22244270145893097,
"learning_rate": 8.993177550236464e-05,
"loss": 1.5586,
"step": 57
},
{
"epoch": 0.6572237960339944,
"grad_norm": 0.2518632411956787,
"learning_rate": 8.940053768033609e-05,
"loss": 1.6115,
"step": 58
},
{
"epoch": 0.6685552407932012,
"grad_norm": 0.29246291518211365,
"learning_rate": 8.885729807284856e-05,
"loss": 1.5243,
"step": 59
},
{
"epoch": 0.6798866855524079,
"grad_norm": 0.25210002064704895,
"learning_rate": 8.83022221559489e-05,
"loss": 1.5797,
"step": 60
},
{
"epoch": 0.6912181303116147,
"grad_norm": 0.25076404213905334,
"learning_rate": 8.773547901113862e-05,
"loss": 1.5713,
"step": 61
},
{
"epoch": 0.7025495750708215,
"grad_norm": 0.25076547265052795,
"learning_rate": 8.715724127386972e-05,
"loss": 1.6577,
"step": 62
},
{
"epoch": 0.7138810198300283,
"grad_norm": 0.30937209725379944,
"learning_rate": 8.656768508095853e-05,
"loss": 1.718,
"step": 63
},
{
"epoch": 0.7252124645892352,
"grad_norm": 0.26684439182281494,
"learning_rate": 8.596699001693255e-05,
"loss": 1.6358,
"step": 64
},
{
"epoch": 0.7365439093484419,
"grad_norm": 0.30793294310569763,
"learning_rate": 8.535533905932738e-05,
"loss": 1.6784,
"step": 65
},
{
"epoch": 0.7478753541076487,
"grad_norm": 0.3234283924102783,
"learning_rate": 8.473291852294987e-05,
"loss": 1.6097,
"step": 66
},
{
"epoch": 0.7592067988668555,
"grad_norm": 0.1931600272655487,
"learning_rate": 8.409991800312493e-05,
"loss": 1.594,
"step": 67
},
{
"epoch": 0.7705382436260623,
"grad_norm": 0.20462581515312195,
"learning_rate": 8.345653031794292e-05,
"loss": 1.4693,
"step": 68
},
{
"epoch": 0.7818696883852692,
"grad_norm": 0.24034181237220764,
"learning_rate": 8.280295144952536e-05,
"loss": 1.6061,
"step": 69
},
{
"epoch": 0.7932011331444759,
"grad_norm": 0.26657557487487793,
"learning_rate": 8.213938048432697e-05,
"loss": 1.6939,
"step": 70
},
{
"epoch": 0.8045325779036827,
"grad_norm": 0.23830075562000275,
"learning_rate": 8.146601955249188e-05,
"loss": 1.4996,
"step": 71
},
{
"epoch": 0.8158640226628895,
"grad_norm": 0.26126208901405334,
"learning_rate": 8.07830737662829e-05,
"loss": 1.6027,
"step": 72
},
{
"epoch": 0.8271954674220963,
"grad_norm": 0.22615301609039307,
"learning_rate": 8.009075115760243e-05,
"loss": 1.4696,
"step": 73
},
{
"epoch": 0.8385269121813032,
"grad_norm": 0.20603279769420624,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5004,
"step": 74
},
{
"epoch": 0.8498583569405099,
"grad_norm": 0.2174687385559082,
"learning_rate": 7.86788218175523e-05,
"loss": 1.6208,
"step": 75
},
{
"epoch": 0.8611898016997167,
"grad_norm": 0.26254647970199585,
"learning_rate": 7.795964517353735e-05,
"loss": 1.5828,
"step": 76
},
{
"epoch": 0.8725212464589235,
"grad_norm": 0.23399892449378967,
"learning_rate": 7.723195175075136e-05,
"loss": 1.4614,
"step": 77
},
{
"epoch": 0.8838526912181303,
"grad_norm": 0.27953198552131653,
"learning_rate": 7.649596321166024e-05,
"loss": 1.5498,
"step": 78
},
{
"epoch": 0.8951841359773371,
"grad_norm": 0.23206909000873566,
"learning_rate": 7.575190374550272e-05,
"loss": 1.5383,
"step": 79
},
{
"epoch": 0.9065155807365439,
"grad_norm": 0.2643072307109833,
"learning_rate": 7.500000000000001e-05,
"loss": 1.4548,
"step": 80
},
{
"epoch": 0.9178470254957507,
"grad_norm": 0.27832943201065063,
"learning_rate": 7.424048101231686e-05,
"loss": 1.5341,
"step": 81
},
{
"epoch": 0.9291784702549575,
"grad_norm": 0.26693248748779297,
"learning_rate": 7.347357813929454e-05,
"loss": 1.6302,
"step": 82
},
{
"epoch": 0.9405099150141643,
"grad_norm": 0.3323819935321808,
"learning_rate": 7.269952498697734e-05,
"loss": 1.6596,
"step": 83
},
{
"epoch": 0.9518413597733711,
"grad_norm": 0.26522237062454224,
"learning_rate": 7.191855733945387e-05,
"loss": 1.6442,
"step": 84
},
{
"epoch": 0.9631728045325779,
"grad_norm": 0.32830557227134705,
"learning_rate": 7.113091308703498e-05,
"loss": 1.6136,
"step": 85
},
{
"epoch": 0.9745042492917847,
"grad_norm": 0.33412694931030273,
"learning_rate": 7.033683215379002e-05,
"loss": 1.6131,
"step": 86
},
{
"epoch": 0.9858356940509915,
"grad_norm": 0.3402051329612732,
"learning_rate": 6.953655642446368e-05,
"loss": 1.5409,
"step": 87
},
{
"epoch": 0.9971671388101983,
"grad_norm": 0.3674866259098053,
"learning_rate": 6.873032967079561e-05,
"loss": 1.5472,
"step": 88
},
{
"epoch": 1.0084985835694051,
"grad_norm": 0.4021819233894348,
"learning_rate": 6.7918397477265e-05,
"loss": 2.9136,
"step": 89
},
{
"epoch": 1.019830028328612,
"grad_norm": 0.1946014165878296,
"learning_rate": 6.710100716628344e-05,
"loss": 1.4662,
"step": 90
},
{
"epoch": 1.0311614730878187,
"grad_norm": 0.23185749351978302,
"learning_rate": 6.627840772285784e-05,
"loss": 1.4939,
"step": 91
},
{
"epoch": 1.0424929178470255,
"grad_norm": 0.23428942263126373,
"learning_rate": 6.545084971874738e-05,
"loss": 1.6243,
"step": 92
},
{
"epoch": 1.0538243626062322,
"grad_norm": 0.20384444296360016,
"learning_rate": 6.461858523613684e-05,
"loss": 1.4134,
"step": 93
},
{
"epoch": 1.065155807365439,
"grad_norm": 0.2560741901397705,
"learning_rate": 6.378186779084995e-05,
"loss": 1.5125,
"step": 94
},
{
"epoch": 1.0764872521246458,
"grad_norm": 0.26942193508148193,
"learning_rate": 6.294095225512603e-05,
"loss": 1.395,
"step": 95
},
{
"epoch": 1.0878186968838528,
"grad_norm": 0.24636012315750122,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4569,
"step": 96
},
{
"epoch": 1.0991501416430596,
"grad_norm": 0.22894567251205444,
"learning_rate": 6.124755271719325e-05,
"loss": 1.5568,
"step": 97
},
{
"epoch": 1.1104815864022664,
"grad_norm": 0.21801277995109558,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.4842,
"step": 98
},
{
"epoch": 1.1218130311614731,
"grad_norm": 0.24690905213356018,
"learning_rate": 5.9540449768827246e-05,
"loss": 1.4222,
"step": 99
},
{
"epoch": 1.13314447592068,
"grad_norm": 0.23386448621749878,
"learning_rate": 5.868240888334653e-05,
"loss": 1.5666,
"step": 100
},
{
"epoch": 1.13314447592068,
"eval_loss": 1.5487265586853027,
"eval_runtime": 4.1386,
"eval_samples_per_second": 36.002,
"eval_steps_per_second": 18.122,
"step": 100
},
{
"epoch": 1.1444759206798867,
"grad_norm": 0.2477850615978241,
"learning_rate": 5.782172325201155e-05,
"loss": 1.4764,
"step": 101
},
{
"epoch": 1.1558073654390935,
"grad_norm": 0.30012187361717224,
"learning_rate": 5.695865504800327e-05,
"loss": 1.3427,
"step": 102
},
{
"epoch": 1.1671388101983002,
"grad_norm": 0.2721880376338959,
"learning_rate": 5.6093467170257374e-05,
"loss": 1.4597,
"step": 103
},
{
"epoch": 1.178470254957507,
"grad_norm": 0.30785053968429565,
"learning_rate": 5.522642316338268e-05,
"loss": 1.4804,
"step": 104
},
{
"epoch": 1.1898016997167138,
"grad_norm": 0.34216204285621643,
"learning_rate": 5.435778713738292e-05,
"loss": 1.6197,
"step": 105
},
{
"epoch": 1.2011331444759206,
"grad_norm": 0.3311666250228882,
"learning_rate": 5.348782368720626e-05,
"loss": 1.6815,
"step": 106
},
{
"epoch": 1.2124645892351276,
"grad_norm": 0.30523356795310974,
"learning_rate": 5.26167978121472e-05,
"loss": 1.5614,
"step": 107
},
{
"epoch": 1.2237960339943343,
"grad_norm": 0.34505927562713623,
"learning_rate": 5.174497483512506e-05,
"loss": 1.4657,
"step": 108
},
{
"epoch": 1.2351274787535411,
"grad_norm": 0.36857518553733826,
"learning_rate": 5.0872620321864185e-05,
"loss": 1.539,
"step": 109
},
{
"epoch": 1.246458923512748,
"grad_norm": 0.4266189932823181,
"learning_rate": 5e-05,
"loss": 1.473,
"step": 110
},
{
"epoch": 1.2577903682719547,
"grad_norm": 0.2451467663049698,
"learning_rate": 4.912737967813583e-05,
"loss": 1.5531,
"step": 111
},
{
"epoch": 1.2691218130311614,
"grad_norm": 0.21580742299556732,
"learning_rate": 4.825502516487497e-05,
"loss": 1.4198,
"step": 112
},
{
"epoch": 1.2804532577903682,
"grad_norm": 0.24123479425907135,
"learning_rate": 4.738320218785281e-05,
"loss": 1.4452,
"step": 113
},
{
"epoch": 1.291784702549575,
"grad_norm": 0.2361007034778595,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.5802,
"step": 114
},
{
"epoch": 1.3031161473087818,
"grad_norm": 0.23018887639045715,
"learning_rate": 4.564221286261709e-05,
"loss": 1.4528,
"step": 115
},
{
"epoch": 1.3144475920679888,
"grad_norm": 0.2591703534126282,
"learning_rate": 4.477357683661734e-05,
"loss": 1.4171,
"step": 116
},
{
"epoch": 1.3257790368271953,
"grad_norm": 0.22410644590854645,
"learning_rate": 4.390653282974264e-05,
"loss": 1.4474,
"step": 117
},
{
"epoch": 1.3371104815864023,
"grad_norm": 0.25192803144454956,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.4302,
"step": 118
},
{
"epoch": 1.348441926345609,
"grad_norm": 0.2527298331260681,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.4655,
"step": 119
},
{
"epoch": 1.3597733711048159,
"grad_norm": 0.2601375877857208,
"learning_rate": 4.131759111665349e-05,
"loss": 1.4757,
"step": 120
},
{
"epoch": 1.3711048158640227,
"grad_norm": 0.23596332967281342,
"learning_rate": 4.045955023117276e-05,
"loss": 1.4582,
"step": 121
},
{
"epoch": 1.3824362606232294,
"grad_norm": 0.24836811423301697,
"learning_rate": 3.960441545911204e-05,
"loss": 1.4937,
"step": 122
},
{
"epoch": 1.3937677053824362,
"grad_norm": 0.3072097897529602,
"learning_rate": 3.875244728280676e-05,
"loss": 1.4214,
"step": 123
},
{
"epoch": 1.405099150141643,
"grad_norm": 0.28567826747894287,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4633,
"step": 124
},
{
"epoch": 1.41643059490085,
"grad_norm": 0.2974615693092346,
"learning_rate": 3.705904774487396e-05,
"loss": 1.4091,
"step": 125
},
{
"epoch": 1.4277620396600565,
"grad_norm": 0.28619298338890076,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.3751,
"step": 126
},
{
"epoch": 1.4390934844192635,
"grad_norm": 0.2666260600090027,
"learning_rate": 3.5381414763863166e-05,
"loss": 1.4802,
"step": 127
},
{
"epoch": 1.4504249291784703,
"grad_norm": 0.3570886552333832,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.3981,
"step": 128
},
{
"epoch": 1.461756373937677,
"grad_norm": 0.3145367503166199,
"learning_rate": 3.372159227714218e-05,
"loss": 1.5997,
"step": 129
},
{
"epoch": 1.4730878186968839,
"grad_norm": 0.3386094868183136,
"learning_rate": 3.289899283371657e-05,
"loss": 1.4661,
"step": 130
},
{
"epoch": 1.4844192634560907,
"grad_norm": 0.3534667491912842,
"learning_rate": 3.2081602522734986e-05,
"loss": 1.4785,
"step": 131
},
{
"epoch": 1.4957507082152974,
"grad_norm": 0.3897930383682251,
"learning_rate": 3.12696703292044e-05,
"loss": 1.415,
"step": 132
},
{
"epoch": 1.5070821529745042,
"grad_norm": 0.23832640051841736,
"learning_rate": 3.046344357553632e-05,
"loss": 1.4344,
"step": 133
},
{
"epoch": 1.5184135977337112,
"grad_norm": 0.23151782155036926,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.4103,
"step": 134
},
{
"epoch": 1.5297450424929178,
"grad_norm": 0.22246257960796356,
"learning_rate": 2.886908691296504e-05,
"loss": 1.4704,
"step": 135
},
{
"epoch": 1.5410764872521248,
"grad_norm": 0.20327092707157135,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.5477,
"step": 136
},
{
"epoch": 1.5524079320113313,
"grad_norm": 0.2647717297077179,
"learning_rate": 2.7300475013022663e-05,
"loss": 1.4549,
"step": 137
},
{
"epoch": 1.5637393767705383,
"grad_norm": 0.28701451420783997,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.3741,
"step": 138
},
{
"epoch": 1.575070821529745,
"grad_norm": 0.23964330554008484,
"learning_rate": 2.575951898768315e-05,
"loss": 1.355,
"step": 139
},
{
"epoch": 1.5864022662889519,
"grad_norm": 0.2663789689540863,
"learning_rate": 2.500000000000001e-05,
"loss": 1.348,
"step": 140
},
{
"epoch": 1.5977337110481586,
"grad_norm": 0.26375189423561096,
"learning_rate": 2.4248096254497288e-05,
"loss": 1.5205,
"step": 141
},
{
"epoch": 1.6090651558073654,
"grad_norm": 0.2846963107585907,
"learning_rate": 2.350403678833976e-05,
"loss": 1.5513,
"step": 142
},
{
"epoch": 1.6203966005665722,
"grad_norm": 0.2618456184864044,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.4012,
"step": 143
},
{
"epoch": 1.631728045325779,
"grad_norm": 0.2383047491312027,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.3704,
"step": 144
},
{
"epoch": 1.643059490084986,
"grad_norm": 0.30709442496299744,
"learning_rate": 2.132117818244771e-05,
"loss": 1.525,
"step": 145
},
{
"epoch": 1.6543909348441925,
"grad_norm": 0.2546956539154053,
"learning_rate": 2.061073738537635e-05,
"loss": 1.4563,
"step": 146
},
{
"epoch": 1.6657223796033995,
"grad_norm": 0.27411651611328125,
"learning_rate": 1.9909248842397584e-05,
"loss": 1.3791,
"step": 147
},
{
"epoch": 1.677053824362606,
"grad_norm": 0.24941976368427277,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.4491,
"step": 148
},
{
"epoch": 1.688385269121813,
"grad_norm": 0.3089727461338043,
"learning_rate": 1.8533980447508137e-05,
"loss": 1.5384,
"step": 149
},
{
"epoch": 1.6997167138810199,
"grad_norm": 0.2955828309059143,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.5034,
"step": 150
},
{
"epoch": 1.6997167138810199,
"eval_loss": 1.4917516708374023,
"eval_runtime": 4.1358,
"eval_samples_per_second": 36.027,
"eval_steps_per_second": 18.135,
"step": 150
},
{
"epoch": 1.7110481586402266,
"grad_norm": 0.35035616159439087,
"learning_rate": 1.7197048550474643e-05,
"loss": 1.4943,
"step": 151
},
{
"epoch": 1.7223796033994334,
"grad_norm": 0.3925711512565613,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.4122,
"step": 152
},
{
"epoch": 1.7337110481586402,
"grad_norm": 0.3977547287940979,
"learning_rate": 1.5900081996875083e-05,
"loss": 1.4111,
"step": 153
},
{
"epoch": 1.7450424929178472,
"grad_norm": 0.3666156530380249,
"learning_rate": 1.526708147705013e-05,
"loss": 1.3458,
"step": 154
},
{
"epoch": 1.7563739376770537,
"grad_norm": 0.2597617506980896,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.7857,
"step": 155
},
{
"epoch": 1.7677053824362607,
"grad_norm": 0.23084954917430878,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.3318,
"step": 156
},
{
"epoch": 1.7790368271954673,
"grad_norm": 0.2235339879989624,
"learning_rate": 1.3432314919041478e-05,
"loss": 1.4747,
"step": 157
},
{
"epoch": 1.7903682719546743,
"grad_norm": 0.2879646420478821,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.4755,
"step": 158
},
{
"epoch": 1.801699716713881,
"grad_norm": 0.2572542726993561,
"learning_rate": 1.22645209888614e-05,
"loss": 1.4433,
"step": 159
},
{
"epoch": 1.8130311614730878,
"grad_norm": 0.2513543963432312,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.3965,
"step": 160
},
{
"epoch": 1.8243626062322946,
"grad_norm": 0.21489116549491882,
"learning_rate": 1.1142701927151456e-05,
"loss": 1.33,
"step": 161
},
{
"epoch": 1.8356940509915014,
"grad_norm": 0.22866599261760712,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.394,
"step": 162
},
{
"epoch": 1.8470254957507082,
"grad_norm": 0.26061880588531494,
"learning_rate": 1.006822449763537e-05,
"loss": 1.5896,
"step": 163
},
{
"epoch": 1.858356940509915,
"grad_norm": 0.282439261674881,
"learning_rate": 9.549150281252633e-06,
"loss": 1.3966,
"step": 164
},
{
"epoch": 1.869688385269122,
"grad_norm": 0.24080446362495422,
"learning_rate": 9.042397785550405e-06,
"loss": 1.362,
"step": 165
},
{
"epoch": 1.8810198300283285,
"grad_norm": 0.25717172026634216,
"learning_rate": 8.548121372247918e-06,
"loss": 1.4998,
"step": 166
},
{
"epoch": 1.8923512747875355,
"grad_norm": 0.2697215676307678,
"learning_rate": 8.066471602728803e-06,
"loss": 1.3807,
"step": 167
},
{
"epoch": 1.903682719546742,
"grad_norm": 0.310925155878067,
"learning_rate": 7.597595192178702e-06,
"loss": 1.4482,
"step": 168
},
{
"epoch": 1.915014164305949,
"grad_norm": 0.29516807198524475,
"learning_rate": 7.1416349648943894e-06,
"loss": 1.4043,
"step": 169
},
{
"epoch": 1.9263456090651558,
"grad_norm": 0.296526163816452,
"learning_rate": 6.698729810778065e-06,
"loss": 1.2728,
"step": 170
},
{
"epoch": 1.9376770538243626,
"grad_norm": 0.29434874653816223,
"learning_rate": 6.269014643030213e-06,
"loss": 1.5259,
"step": 171
},
{
"epoch": 1.9490084985835694,
"grad_norm": 0.33363068103790283,
"learning_rate": 5.852620357053651e-06,
"loss": 1.5351,
"step": 172
},
{
"epoch": 1.9603399433427762,
"grad_norm": 0.316654235124588,
"learning_rate": 5.449673790581611e-06,
"loss": 1.461,
"step": 173
},
{
"epoch": 1.9716713881019832,
"grad_norm": 0.32432806491851807,
"learning_rate": 5.060297685041659e-06,
"loss": 1.4102,
"step": 174
},
{
"epoch": 1.9830028328611897,
"grad_norm": 0.42273372411727905,
"learning_rate": 4.684610648167503e-06,
"loss": 1.4292,
"step": 175
},
{
"epoch": 1.9943342776203967,
"grad_norm": 0.4274376332759857,
"learning_rate": 4.322727117869951e-06,
"loss": 1.3414,
"step": 176
},
{
"epoch": 2.0056657223796033,
"grad_norm": 0.5224671959877014,
"learning_rate": 3.974757327377981e-06,
"loss": 2.7612,
"step": 177
},
{
"epoch": 2.0169971671388103,
"grad_norm": 0.21540716290473938,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.3881,
"step": 178
},
{
"epoch": 2.028328611898017,
"grad_norm": 0.23689907789230347,
"learning_rate": 3.3209786751399187e-06,
"loss": 1.4468,
"step": 179
},
{
"epoch": 2.039660056657224,
"grad_norm": 0.20983755588531494,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.5177,
"step": 180
},
{
"epoch": 2.0509915014164304,
"grad_norm": 0.2480371594429016,
"learning_rate": 2.724071220034158e-06,
"loss": 1.4568,
"step": 181
},
{
"epoch": 2.0623229461756374,
"grad_norm": 0.22640255093574524,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.3294,
"step": 182
},
{
"epoch": 2.0736543909348444,
"grad_norm": 0.2093873769044876,
"learning_rate": 2.1847622018482283e-06,
"loss": 1.3515,
"step": 183
},
{
"epoch": 2.084985835694051,
"grad_norm": 0.26255685091018677,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3853,
"step": 184
},
{
"epoch": 2.096317280453258,
"grad_norm": 0.28129681944847107,
"learning_rate": 1.70370868554659e-06,
"loss": 1.4554,
"step": 185
},
{
"epoch": 2.1076487252124645,
"grad_norm": 0.24436384439468384,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.3988,
"step": 186
},
{
"epoch": 2.1189801699716715,
"grad_norm": 0.2415541410446167,
"learning_rate": 1.2814967607382432e-06,
"loss": 1.397,
"step": 187
},
{
"epoch": 2.130311614730878,
"grad_norm": 0.2871330678462982,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.4888,
"step": 188
},
{
"epoch": 2.141643059490085,
"grad_norm": 0.2677474915981293,
"learning_rate": 9.186408276168013e-07,
"loss": 1.352,
"step": 189
},
{
"epoch": 2.1529745042492916,
"grad_norm": 0.2391105592250824,
"learning_rate": 7.596123493895991e-07,
"loss": 1.4341,
"step": 190
},
{
"epoch": 2.1643059490084986,
"grad_norm": 0.29239320755004883,
"learning_rate": 6.15582970243117e-07,
"loss": 1.3461,
"step": 191
},
{
"epoch": 2.1756373937677056,
"grad_norm": 0.29971420764923096,
"learning_rate": 4.865965629214819e-07,
"loss": 1.3182,
"step": 192
},
{
"epoch": 2.186968838526912,
"grad_norm": 0.2861343026161194,
"learning_rate": 3.7269241793390085e-07,
"loss": 1.5046,
"step": 193
},
{
"epoch": 2.198300283286119,
"grad_norm": 0.35035207867622375,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.4593,
"step": 194
},
{
"epoch": 2.2096317280453257,
"grad_norm": 0.3201359212398529,
"learning_rate": 1.9026509541272275e-07,
"loss": 1.4772,
"step": 195
},
{
"epoch": 2.2209631728045327,
"grad_norm": 0.3481467068195343,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.4378,
"step": 196
},
{
"epoch": 2.2322946175637393,
"grad_norm": 0.3610140383243561,
"learning_rate": 6.852326227130834e-08,
"loss": 1.3915,
"step": 197
},
{
"epoch": 2.2436260623229463,
"grad_norm": 0.4310694634914398,
"learning_rate": 3.04586490452119e-08,
"loss": 1.3193,
"step": 198
},
{
"epoch": 2.254957507082153,
"grad_norm": 0.2926948070526123,
"learning_rate": 7.615242180436522e-09,
"loss": 1.4718,
"step": 199
},
{
"epoch": 2.26628895184136,
"grad_norm": 0.21256959438323975,
"learning_rate": 0.0,
"loss": 1.2755,
"step": 200
},
{
"epoch": 2.26628895184136,
"eval_loss": 1.4853540658950806,
"eval_runtime": 4.1391,
"eval_samples_per_second": 35.998,
"eval_steps_per_second": 18.12,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9879433520873472.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}