Nexspear's picture
Training in progress, step 420, checkpoint
05d113e verified
raw
history blame
27.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.06291899179806,
"eval_steps": 42,
"global_step": 420,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00014980712332871428,
"eval_loss": 1.160882592201233,
"eval_runtime": 311.16,
"eval_samples_per_second": 36.133,
"eval_steps_per_second": 4.519,
"step": 1
},
{
"epoch": 0.00044942136998614283,
"grad_norm": 0.12129199504852295,
"learning_rate": 1.5e-05,
"loss": 1.1193,
"step": 3
},
{
"epoch": 0.0008988427399722857,
"grad_norm": 0.11971050500869751,
"learning_rate": 3e-05,
"loss": 1.103,
"step": 6
},
{
"epoch": 0.0013482641099584285,
"grad_norm": 0.10862895101308823,
"learning_rate": 4.5e-05,
"loss": 1.0659,
"step": 9
},
{
"epoch": 0.0017976854799445713,
"grad_norm": 0.11977853626012802,
"learning_rate": 4.9997944716957985e-05,
"loss": 1.0914,
"step": 12
},
{
"epoch": 0.0022471068499307144,
"grad_norm": 0.12554779648780823,
"learning_rate": 4.99871554050172e-05,
"loss": 1.1725,
"step": 15
},
{
"epoch": 0.002696528219916857,
"grad_norm": 0.13287784159183502,
"learning_rate": 4.996712222958461e-05,
"loss": 1.1145,
"step": 18
},
{
"epoch": 0.003145949589903,
"grad_norm": 0.1167389452457428,
"learning_rate": 4.993785260182552e-05,
"loss": 1.0956,
"step": 21
},
{
"epoch": 0.0035953709598891426,
"grad_norm": 0.11548212915658951,
"learning_rate": 4.989935734988098e-05,
"loss": 1.12,
"step": 24
},
{
"epoch": 0.004044792329875285,
"grad_norm": 0.12251826375722885,
"learning_rate": 4.9851650714862006e-05,
"loss": 1.0895,
"step": 27
},
{
"epoch": 0.004494213699861429,
"grad_norm": 0.12304849177598953,
"learning_rate": 4.979475034558115e-05,
"loss": 1.1302,
"step": 30
},
{
"epoch": 0.004943635069847571,
"grad_norm": 0.12594439089298248,
"learning_rate": 4.9728677292023405e-05,
"loss": 1.0825,
"step": 33
},
{
"epoch": 0.005393056439833714,
"grad_norm": 0.11802031844854355,
"learning_rate": 4.965345599755887e-05,
"loss": 1.0949,
"step": 36
},
{
"epoch": 0.005842477809819857,
"grad_norm": 0.12052260339260101,
"learning_rate": 4.95691142899001e-05,
"loss": 1.2106,
"step": 39
},
{
"epoch": 0.006291899179806,
"grad_norm": 0.14151117205619812,
"learning_rate": 4.9475683370807326e-05,
"loss": 1.0855,
"step": 42
},
{
"epoch": 0.006291899179806,
"eval_loss": 1.1297273635864258,
"eval_runtime": 313.2801,
"eval_samples_per_second": 35.888,
"eval_steps_per_second": 4.488,
"step": 42
},
{
"epoch": 0.006741320549792143,
"grad_norm": 0.13759616017341614,
"learning_rate": 4.937319780454559e-05,
"loss": 1.1133,
"step": 45
},
{
"epoch": 0.007190741919778285,
"grad_norm": 0.14887671172618866,
"learning_rate": 4.926169550509787e-05,
"loss": 1.1339,
"step": 48
},
{
"epoch": 0.007640163289764428,
"grad_norm": 0.13235358893871307,
"learning_rate": 4.914121772213898e-05,
"loss": 1.0057,
"step": 51
},
{
"epoch": 0.00808958465975057,
"grad_norm": 0.14951473474502563,
"learning_rate": 4.9011809025775486e-05,
"loss": 1.0919,
"step": 54
},
{
"epoch": 0.008539006029736713,
"grad_norm": 0.1409119963645935,
"learning_rate": 4.887351729005726e-05,
"loss": 1.143,
"step": 57
},
{
"epoch": 0.008988427399722857,
"grad_norm": 0.13198496401309967,
"learning_rate": 4.8726393675266716e-05,
"loss": 1.0853,
"step": 60
},
{
"epoch": 0.009437848769709,
"grad_norm": 0.15319645404815674,
"learning_rate": 4.8570492608992325e-05,
"loss": 1.0534,
"step": 63
},
{
"epoch": 0.009887270139695143,
"grad_norm": 0.1532219797372818,
"learning_rate": 4.8405871765993433e-05,
"loss": 1.1016,
"step": 66
},
{
"epoch": 0.010336691509681285,
"grad_norm": 0.14198264479637146,
"learning_rate": 4.82325920468638e-05,
"loss": 1.029,
"step": 69
},
{
"epoch": 0.010786112879667428,
"grad_norm": 0.12415461987257004,
"learning_rate": 4.805071755550177e-05,
"loss": 1.1565,
"step": 72
},
{
"epoch": 0.01123553424965357,
"grad_norm": 0.1350441873073578,
"learning_rate": 4.7860315575395316e-05,
"loss": 1.1276,
"step": 75
},
{
"epoch": 0.011684955619639713,
"grad_norm": 0.12075574696063995,
"learning_rate": 4.766145654473095e-05,
"loss": 1.0753,
"step": 78
},
{
"epoch": 0.012134376989625857,
"grad_norm": 0.11550460755825043,
"learning_rate": 4.745421403033548e-05,
"loss": 1.0078,
"step": 81
},
{
"epoch": 0.012583798359612,
"grad_norm": 0.12455905228853226,
"learning_rate": 4.72386647004603e-05,
"loss": 1.0373,
"step": 84
},
{
"epoch": 0.012583798359612,
"eval_loss": 1.1157194375991821,
"eval_runtime": 312.8234,
"eval_samples_per_second": 35.94,
"eval_steps_per_second": 4.495,
"step": 84
},
{
"epoch": 0.013033219729598143,
"grad_norm": 0.12993893027305603,
"learning_rate": 4.701488829641845e-05,
"loss": 1.1313,
"step": 87
},
{
"epoch": 0.013482641099584285,
"grad_norm": 0.18705669045448303,
"learning_rate": 4.678296760308474e-05,
"loss": 1.2752,
"step": 90
},
{
"epoch": 0.013932062469570428,
"grad_norm": 0.14135660231113434,
"learning_rate": 4.6542988418269876e-05,
"loss": 1.1628,
"step": 93
},
{
"epoch": 0.01438148383955657,
"grad_norm": 0.11707276850938797,
"learning_rate": 4.629503952098011e-05,
"loss": 1.0801,
"step": 96
},
{
"epoch": 0.014830905209542713,
"grad_norm": 0.14167702198028564,
"learning_rate": 4.6039212638573833e-05,
"loss": 1.0452,
"step": 99
},
{
"epoch": 0.015280326579528856,
"grad_norm": 0.15983764827251434,
"learning_rate": 4.5775602412827604e-05,
"loss": 1.1844,
"step": 102
},
{
"epoch": 0.015729747949515,
"grad_norm": 0.12750358879566193,
"learning_rate": 4.55043063649239e-05,
"loss": 1.0877,
"step": 105
},
{
"epoch": 0.01617916931950114,
"grad_norm": 0.1487520933151245,
"learning_rate": 4.522542485937369e-05,
"loss": 1.1373,
"step": 108
},
{
"epoch": 0.016628590689487285,
"grad_norm": 0.16089919209480286,
"learning_rate": 4.493906106688712e-05,
"loss": 1.1806,
"step": 111
},
{
"epoch": 0.017078012059473426,
"grad_norm": 0.158865287899971,
"learning_rate": 4.4645320926206064e-05,
"loss": 1.0689,
"step": 114
},
{
"epoch": 0.01752743342945957,
"grad_norm": 0.16464003920555115,
"learning_rate": 4.434431310491267e-05,
"loss": 1.1319,
"step": 117
},
{
"epoch": 0.017976854799445715,
"grad_norm": 0.12468679249286652,
"learning_rate": 4.4036148959228365e-05,
"loss": 1.0237,
"step": 120
},
{
"epoch": 0.018426276169431856,
"grad_norm": 0.14475342631340027,
"learning_rate": 4.372094249281821e-05,
"loss": 1.1352,
"step": 123
},
{
"epoch": 0.018875697539418,
"grad_norm": 0.1386341154575348,
"learning_rate": 4.3398810314615876e-05,
"loss": 1.177,
"step": 126
},
{
"epoch": 0.018875697539418,
"eval_loss": 1.1068843603134155,
"eval_runtime": 312.5635,
"eval_samples_per_second": 35.97,
"eval_steps_per_second": 4.498,
"step": 126
},
{
"epoch": 0.01932511890940414,
"grad_norm": 0.13838274776935577,
"learning_rate": 4.306987159568479e-05,
"loss": 0.9996,
"step": 129
},
{
"epoch": 0.019774540279390285,
"grad_norm": 0.14386090636253357,
"learning_rate": 4.273424802513145e-05,
"loss": 0.9947,
"step": 132
},
{
"epoch": 0.020223961649376426,
"grad_norm": 0.14473505318164825,
"learning_rate": 4.239206376508717e-05,
"loss": 1.1779,
"step": 135
},
{
"epoch": 0.02067338301936257,
"grad_norm": 0.15975573658943176,
"learning_rate": 4.204344540477499e-05,
"loss": 1.2125,
"step": 138
},
{
"epoch": 0.021122804389348715,
"grad_norm": 0.12576735019683838,
"learning_rate": 4.16885219136787e-05,
"loss": 1.1312,
"step": 141
},
{
"epoch": 0.021572225759334856,
"grad_norm": 0.16916967928409576,
"learning_rate": 4.132742459383122e-05,
"loss": 1.0823,
"step": 144
},
{
"epoch": 0.022021647129321,
"grad_norm": 0.17954471707344055,
"learning_rate": 4.096028703124014e-05,
"loss": 1.0728,
"step": 147
},
{
"epoch": 0.02247106849930714,
"grad_norm": 0.11972087621688843,
"learning_rate": 4.058724504646834e-05,
"loss": 1.0886,
"step": 150
},
{
"epoch": 0.022920489869293285,
"grad_norm": 0.14640676975250244,
"learning_rate": 4.0208436644387834e-05,
"loss": 1.0472,
"step": 153
},
{
"epoch": 0.023369911239279426,
"grad_norm": 0.13775284588336945,
"learning_rate": 3.982400196312564e-05,
"loss": 1.0984,
"step": 156
},
{
"epoch": 0.02381933260926557,
"grad_norm": 0.14184071123600006,
"learning_rate": 3.943408322222049e-05,
"loss": 1.1431,
"step": 159
},
{
"epoch": 0.024268753979251715,
"grad_norm": 0.1685701608657837,
"learning_rate": 3.903882467000937e-05,
"loss": 1.1531,
"step": 162
},
{
"epoch": 0.024718175349237856,
"grad_norm": 0.1468999981880188,
"learning_rate": 3.8638372530263715e-05,
"loss": 1.1069,
"step": 165
},
{
"epoch": 0.025167596719224,
"grad_norm": 0.13278649747371674,
"learning_rate": 3.823287494809469e-05,
"loss": 1.1124,
"step": 168
},
{
"epoch": 0.025167596719224,
"eval_loss": 1.1012423038482666,
"eval_runtime": 312.1008,
"eval_samples_per_second": 36.024,
"eval_steps_per_second": 4.505,
"step": 168
},
{
"epoch": 0.02561701808921014,
"grad_norm": 0.180728018283844,
"learning_rate": 3.782248193514766e-05,
"loss": 1.0867,
"step": 171
},
{
"epoch": 0.026066439459196285,
"grad_norm": 0.15069565176963806,
"learning_rate": 3.740734531410626e-05,
"loss": 1.0624,
"step": 174
},
{
"epoch": 0.026515860829182426,
"grad_norm": 0.13892242312431335,
"learning_rate": 3.698761866252635e-05,
"loss": 1.0351,
"step": 177
},
{
"epoch": 0.02696528219916857,
"grad_norm": 0.1399199515581131,
"learning_rate": 3.656345725602089e-05,
"loss": 1.1609,
"step": 180
},
{
"epoch": 0.027414703569154715,
"grad_norm": 0.14930486679077148,
"learning_rate": 3.6135018010816477e-05,
"loss": 1.1117,
"step": 183
},
{
"epoch": 0.027864124939140856,
"grad_norm": 0.15556196868419647,
"learning_rate": 3.570245942570315e-05,
"loss": 1.1169,
"step": 186
},
{
"epoch": 0.028313546309127,
"grad_norm": 0.17272590100765228,
"learning_rate": 3.526594152339845e-05,
"loss": 1.115,
"step": 189
},
{
"epoch": 0.02876296767911314,
"grad_norm": 0.17533355951309204,
"learning_rate": 3.4825625791348096e-05,
"loss": 1.1298,
"step": 192
},
{
"epoch": 0.029212389049099285,
"grad_norm": 0.14778710901737213,
"learning_rate": 3.438167512198436e-05,
"loss": 1.1183,
"step": 195
},
{
"epoch": 0.029661810419085426,
"grad_norm": 0.14693984389305115,
"learning_rate": 3.393425375246503e-05,
"loss": 1.0647,
"step": 198
},
{
"epoch": 0.03011123178907157,
"grad_norm": 0.14994005858898163,
"learning_rate": 3.348352720391469e-05,
"loss": 1.0008,
"step": 201
},
{
"epoch": 0.03056065315905771,
"grad_norm": 0.1611510068178177,
"learning_rate": 3.3029662220191144e-05,
"loss": 1.094,
"step": 204
},
{
"epoch": 0.031010074529043856,
"grad_norm": 0.19615799188613892,
"learning_rate": 3.2572826706199305e-05,
"loss": 1.051,
"step": 207
},
{
"epoch": 0.03145949589903,
"grad_norm": 0.15789468586444855,
"learning_rate": 3.211318966577581e-05,
"loss": 1.0302,
"step": 210
},
{
"epoch": 0.03145949589903,
"eval_loss": 1.0971506834030151,
"eval_runtime": 312.684,
"eval_samples_per_second": 35.956,
"eval_steps_per_second": 4.497,
"step": 210
},
{
"epoch": 0.031908917269016145,
"grad_norm": 0.15568239986896515,
"learning_rate": 3.165092113916688e-05,
"loss": 1.12,
"step": 213
},
{
"epoch": 0.03235833863900228,
"grad_norm": 0.16250278055667877,
"learning_rate": 3.118619214012286e-05,
"loss": 1.0651,
"step": 216
},
{
"epoch": 0.032807760008988426,
"grad_norm": 0.16043943166732788,
"learning_rate": 3.071917459263264e-05,
"loss": 1.1254,
"step": 219
},
{
"epoch": 0.03325718137897457,
"grad_norm": 0.19213679432868958,
"learning_rate": 3.0250041267321232e-05,
"loss": 1.1082,
"step": 222
},
{
"epoch": 0.033706602748960715,
"grad_norm": 0.17149952054023743,
"learning_rate": 2.9778965717534313e-05,
"loss": 1.0345,
"step": 225
},
{
"epoch": 0.03415602411894685,
"grad_norm": 0.19161058962345123,
"learning_rate": 2.9306122215132976e-05,
"loss": 1.2631,
"step": 228
},
{
"epoch": 0.034605445488933,
"grad_norm": 0.12983594834804535,
"learning_rate": 2.8831685686022897e-05,
"loss": 1.0125,
"step": 231
},
{
"epoch": 0.03505486685891914,
"grad_norm": 0.1923818439245224,
"learning_rate": 2.8355831645441388e-05,
"loss": 1.1152,
"step": 234
},
{
"epoch": 0.035504288228905286,
"grad_norm": 0.17539535462856293,
"learning_rate": 2.787873613302649e-05,
"loss": 1.0698,
"step": 237
},
{
"epoch": 0.03595370959889143,
"grad_norm": 0.20096057653427124,
"learning_rate": 2.7400575647692046e-05,
"loss": 1.0742,
"step": 240
},
{
"epoch": 0.03640313096887757,
"grad_norm": 0.1583949774503708,
"learning_rate": 2.692152708233292e-05,
"loss": 1.0255,
"step": 243
},
{
"epoch": 0.03685255233886371,
"grad_norm": 0.13673090934753418,
"learning_rate": 2.6441767658384366e-05,
"loss": 1.1167,
"step": 246
},
{
"epoch": 0.037301973708849856,
"grad_norm": 0.15132947266101837,
"learning_rate": 2.596147486025996e-05,
"loss": 1.053,
"step": 249
},
{
"epoch": 0.037751395078836,
"grad_norm": 0.1649513989686966,
"learning_rate": 2.5480826369692178e-05,
"loss": 1.1041,
"step": 252
},
{
"epoch": 0.037751395078836,
"eval_loss": 1.0942788124084473,
"eval_runtime": 312.947,
"eval_samples_per_second": 35.926,
"eval_steps_per_second": 4.493,
"step": 252
},
{
"epoch": 0.038200816448822145,
"grad_norm": 0.19113826751708984,
"learning_rate": 2.5e-05,
"loss": 1.0117,
"step": 255
},
{
"epoch": 0.03865023781880828,
"grad_norm": 0.17212168872356415,
"learning_rate": 2.4519173630307825e-05,
"loss": 1.136,
"step": 258
},
{
"epoch": 0.039099659188794426,
"grad_norm": 0.16413848102092743,
"learning_rate": 2.403852513974004e-05,
"loss": 1.0806,
"step": 261
},
{
"epoch": 0.03954908055878057,
"grad_norm": 0.15564818680286407,
"learning_rate": 2.3558232341615643e-05,
"loss": 1.0233,
"step": 264
},
{
"epoch": 0.039998501928766715,
"grad_norm": 0.15014681220054626,
"learning_rate": 2.3078472917667092e-05,
"loss": 1.09,
"step": 267
},
{
"epoch": 0.04044792329875285,
"grad_norm": 0.1784486323595047,
"learning_rate": 2.2599424352307957e-05,
"loss": 1.0867,
"step": 270
},
{
"epoch": 0.040897344668739,
"grad_norm": 0.1629609763622284,
"learning_rate": 2.212126386697352e-05,
"loss": 1.0916,
"step": 273
},
{
"epoch": 0.04134676603872514,
"grad_norm": 0.1595894992351532,
"learning_rate": 2.164416835455862e-05,
"loss": 1.0537,
"step": 276
},
{
"epoch": 0.041796187408711286,
"grad_norm": 0.16272102296352386,
"learning_rate": 2.11683143139771e-05,
"loss": 1.1907,
"step": 279
},
{
"epoch": 0.04224560877869743,
"grad_norm": 0.15896858274936676,
"learning_rate": 2.069387778486703e-05,
"loss": 1.0492,
"step": 282
},
{
"epoch": 0.04269503014868357,
"grad_norm": 0.18164744973182678,
"learning_rate": 2.02210342824657e-05,
"loss": 1.064,
"step": 285
},
{
"epoch": 0.04314445151866971,
"grad_norm": 0.17921298742294312,
"learning_rate": 1.9749958732678767e-05,
"loss": 1.1456,
"step": 288
},
{
"epoch": 0.043593872888655856,
"grad_norm": 0.14521931111812592,
"learning_rate": 1.928082540736737e-05,
"loss": 1.1073,
"step": 291
},
{
"epoch": 0.044043294258642,
"grad_norm": 0.15362174808979034,
"learning_rate": 1.8813807859877147e-05,
"loss": 1.0171,
"step": 294
},
{
"epoch": 0.044043294258642,
"eval_loss": 1.0923734903335571,
"eval_runtime": 312.595,
"eval_samples_per_second": 35.967,
"eval_steps_per_second": 4.498,
"step": 294
},
{
"epoch": 0.044492715628628145,
"grad_norm": 0.18710507452487946,
"learning_rate": 1.8349078860833123e-05,
"loss": 1.0978,
"step": 297
},
{
"epoch": 0.04494213699861428,
"grad_norm": 0.1569843292236328,
"learning_rate": 1.7886810334224192e-05,
"loss": 1.0783,
"step": 300
},
{
"epoch": 0.045391558368600426,
"grad_norm": 0.14208896458148956,
"learning_rate": 1.74271732938007e-05,
"loss": 1.0512,
"step": 303
},
{
"epoch": 0.04584097973858657,
"grad_norm": 0.16557104885578156,
"learning_rate": 1.6970337779808862e-05,
"loss": 1.143,
"step": 306
},
{
"epoch": 0.046290401108572715,
"grad_norm": 0.17679089307785034,
"learning_rate": 1.6516472796085315e-05,
"loss": 1.0428,
"step": 309
},
{
"epoch": 0.04673982247855885,
"grad_norm": 0.21449750661849976,
"learning_rate": 1.6065746247534984e-05,
"loss": 1.0541,
"step": 312
},
{
"epoch": 0.047189243848545,
"grad_norm": 0.19140197336673737,
"learning_rate": 1.561832487801565e-05,
"loss": 1.0943,
"step": 315
},
{
"epoch": 0.04763866521853114,
"grad_norm": 0.21146325767040253,
"learning_rate": 1.5174374208651912e-05,
"loss": 1.1286,
"step": 318
},
{
"epoch": 0.048088086588517286,
"grad_norm": 0.16576792299747467,
"learning_rate": 1.4734058476601553e-05,
"loss": 1.1543,
"step": 321
},
{
"epoch": 0.04853750795850343,
"grad_norm": 0.17730730772018433,
"learning_rate": 1.4297540574296869e-05,
"loss": 1.0809,
"step": 324
},
{
"epoch": 0.04898692932848957,
"grad_norm": 0.16303078830242157,
"learning_rate": 1.386498198918352e-05,
"loss": 1.0781,
"step": 327
},
{
"epoch": 0.04943635069847571,
"grad_norm": 0.15803277492523193,
"learning_rate": 1.3436542743979125e-05,
"loss": 1.1138,
"step": 330
},
{
"epoch": 0.049885772068461856,
"grad_norm": 0.161447212100029,
"learning_rate": 1.3012381337473656e-05,
"loss": 1.0087,
"step": 333
},
{
"epoch": 0.050335193438448,
"grad_norm": 0.14916126430034637,
"learning_rate": 1.2592654685893757e-05,
"loss": 1.0916,
"step": 336
},
{
"epoch": 0.050335193438448,
"eval_loss": 1.0906805992126465,
"eval_runtime": 312.9768,
"eval_samples_per_second": 35.923,
"eval_steps_per_second": 4.492,
"step": 336
},
{
"epoch": 0.05078461480843414,
"grad_norm": 0.18624208867549896,
"learning_rate": 1.217751806485235e-05,
"loss": 1.1368,
"step": 339
},
{
"epoch": 0.05123403617842028,
"grad_norm": 0.18036052584648132,
"learning_rate": 1.1767125051905315e-05,
"loss": 1.0682,
"step": 342
},
{
"epoch": 0.051683457548406427,
"grad_norm": 0.17344728112220764,
"learning_rate": 1.1361627469736285e-05,
"loss": 1.0299,
"step": 345
},
{
"epoch": 0.05213287891839257,
"grad_norm": 0.17225950956344604,
"learning_rate": 1.096117532999063e-05,
"loss": 1.0217,
"step": 348
},
{
"epoch": 0.052582300288378715,
"grad_norm": 0.1770411878824234,
"learning_rate": 1.0565916777779519e-05,
"loss": 1.1027,
"step": 351
},
{
"epoch": 0.05303172165836485,
"grad_norm": 0.15190331637859344,
"learning_rate": 1.0175998036874356e-05,
"loss": 1.0684,
"step": 354
},
{
"epoch": 0.053481143028351,
"grad_norm": 0.20575201511383057,
"learning_rate": 9.791563355612172e-06,
"loss": 1.0359,
"step": 357
},
{
"epoch": 0.05393056439833714,
"grad_norm": 0.2163867950439453,
"learning_rate": 9.412754953531663e-06,
"loss": 1.0943,
"step": 360
},
{
"epoch": 0.054379985768323286,
"grad_norm": 0.1523335576057434,
"learning_rate": 9.039712968759864e-06,
"loss": 1.1357,
"step": 363
},
{
"epoch": 0.05482940713830943,
"grad_norm": 0.1748288869857788,
"learning_rate": 8.672575406168782e-06,
"loss": 1.0745,
"step": 366
},
{
"epoch": 0.05527882850829557,
"grad_norm": 0.16783830523490906,
"learning_rate": 8.3114780863213e-06,
"loss": 1.0432,
"step": 369
},
{
"epoch": 0.05572824987828171,
"grad_norm": 0.17801252007484436,
"learning_rate": 7.956554595225016e-06,
"loss": 1.0649,
"step": 372
},
{
"epoch": 0.056177671248267856,
"grad_norm": 0.17944374680519104,
"learning_rate": 7.607936234912841e-06,
"loss": 1.0862,
"step": 375
},
{
"epoch": 0.056627092618254,
"grad_norm": 0.19096186757087708,
"learning_rate": 7.265751974868554e-06,
"loss": 1.0477,
"step": 378
},
{
"epoch": 0.056627092618254,
"eval_loss": 1.0896837711334229,
"eval_runtime": 312.799,
"eval_samples_per_second": 35.943,
"eval_steps_per_second": 4.495,
"step": 378
},
{
"epoch": 0.05707651398824014,
"grad_norm": 0.17599642276763916,
"learning_rate": 6.930128404315214e-06,
"loss": 1.1339,
"step": 381
},
{
"epoch": 0.05752593535822628,
"grad_norm": 0.16360458731651306,
"learning_rate": 6.601189685384126e-06,
"loss": 1.1844,
"step": 384
},
{
"epoch": 0.05797535672821243,
"grad_norm": 0.1602054089307785,
"learning_rate": 6.279057507181796e-06,
"loss": 1.0835,
"step": 387
},
{
"epoch": 0.05842477809819857,
"grad_norm": 0.33288466930389404,
"learning_rate": 5.9638510407716394e-06,
"loss": 1.0366,
"step": 390
},
{
"epoch": 0.058874199468184715,
"grad_norm": 0.17551077902317047,
"learning_rate": 5.655686895087329e-06,
"loss": 1.0361,
"step": 393
},
{
"epoch": 0.05932362083817085,
"grad_norm": 0.19817480444908142,
"learning_rate": 5.354679073793942e-06,
"loss": 1.1113,
"step": 396
},
{
"epoch": 0.059773042208157,
"grad_norm": 0.18397966027259827,
"learning_rate": 5.060938933112891e-06,
"loss": 1.0809,
"step": 399
},
{
"epoch": 0.06022246357814314,
"grad_norm": 0.18848171830177307,
"learning_rate": 4.7745751406263165e-06,
"loss": 1.0232,
"step": 402
},
{
"epoch": 0.060671884948129286,
"grad_norm": 0.18873141705989838,
"learning_rate": 4.495693635076101e-06,
"loss": 1.0633,
"step": 405
},
{
"epoch": 0.06112130631811542,
"grad_norm": 0.17433975636959076,
"learning_rate": 4.224397587172402e-06,
"loss": 1.0064,
"step": 408
},
{
"epoch": 0.06157072768810157,
"grad_norm": 0.2025955468416214,
"learning_rate": 3.9607873614261715e-06,
"loss": 1.1142,
"step": 411
},
{
"epoch": 0.06202014905808771,
"grad_norm": 0.1579374223947525,
"learning_rate": 3.7049604790198976e-06,
"loss": 1.0722,
"step": 414
},
{
"epoch": 0.062469570428073856,
"grad_norm": 0.15210741758346558,
"learning_rate": 3.4570115817301243e-06,
"loss": 1.011,
"step": 417
},
{
"epoch": 0.06291899179806,
"grad_norm": 0.21344028413295746,
"learning_rate": 3.217032396915265e-06,
"loss": 1.1333,
"step": 420
},
{
"epoch": 0.06291899179806,
"eval_loss": 1.0891081094741821,
"eval_runtime": 312.8277,
"eval_samples_per_second": 35.94,
"eval_steps_per_second": 4.494,
"step": 420
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3537807518872371e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}