Nexspear's picture
Training in progress, step 210, checkpoint
f915a4c verified
raw
history blame
14.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03145949589903,
"eval_steps": 42,
"global_step": 210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00014980712332871428,
"eval_loss": 1.160882592201233,
"eval_runtime": 311.16,
"eval_samples_per_second": 36.133,
"eval_steps_per_second": 4.519,
"step": 1
},
{
"epoch": 0.00044942136998614283,
"grad_norm": 0.12129199504852295,
"learning_rate": 1.5e-05,
"loss": 1.1193,
"step": 3
},
{
"epoch": 0.0008988427399722857,
"grad_norm": 0.11971050500869751,
"learning_rate": 3e-05,
"loss": 1.103,
"step": 6
},
{
"epoch": 0.0013482641099584285,
"grad_norm": 0.10862895101308823,
"learning_rate": 4.5e-05,
"loss": 1.0659,
"step": 9
},
{
"epoch": 0.0017976854799445713,
"grad_norm": 0.11977853626012802,
"learning_rate": 4.9997944716957985e-05,
"loss": 1.0914,
"step": 12
},
{
"epoch": 0.0022471068499307144,
"grad_norm": 0.12554779648780823,
"learning_rate": 4.99871554050172e-05,
"loss": 1.1725,
"step": 15
},
{
"epoch": 0.002696528219916857,
"grad_norm": 0.13287784159183502,
"learning_rate": 4.996712222958461e-05,
"loss": 1.1145,
"step": 18
},
{
"epoch": 0.003145949589903,
"grad_norm": 0.1167389452457428,
"learning_rate": 4.993785260182552e-05,
"loss": 1.0956,
"step": 21
},
{
"epoch": 0.0035953709598891426,
"grad_norm": 0.11548212915658951,
"learning_rate": 4.989935734988098e-05,
"loss": 1.12,
"step": 24
},
{
"epoch": 0.004044792329875285,
"grad_norm": 0.12251826375722885,
"learning_rate": 4.9851650714862006e-05,
"loss": 1.0895,
"step": 27
},
{
"epoch": 0.004494213699861429,
"grad_norm": 0.12304849177598953,
"learning_rate": 4.979475034558115e-05,
"loss": 1.1302,
"step": 30
},
{
"epoch": 0.004943635069847571,
"grad_norm": 0.12594439089298248,
"learning_rate": 4.9728677292023405e-05,
"loss": 1.0825,
"step": 33
},
{
"epoch": 0.005393056439833714,
"grad_norm": 0.11802031844854355,
"learning_rate": 4.965345599755887e-05,
"loss": 1.0949,
"step": 36
},
{
"epoch": 0.005842477809819857,
"grad_norm": 0.12052260339260101,
"learning_rate": 4.95691142899001e-05,
"loss": 1.2106,
"step": 39
},
{
"epoch": 0.006291899179806,
"grad_norm": 0.14151117205619812,
"learning_rate": 4.9475683370807326e-05,
"loss": 1.0855,
"step": 42
},
{
"epoch": 0.006291899179806,
"eval_loss": 1.1297273635864258,
"eval_runtime": 313.2801,
"eval_samples_per_second": 35.888,
"eval_steps_per_second": 4.488,
"step": 42
},
{
"epoch": 0.006741320549792143,
"grad_norm": 0.13759616017341614,
"learning_rate": 4.937319780454559e-05,
"loss": 1.1133,
"step": 45
},
{
"epoch": 0.007190741919778285,
"grad_norm": 0.14887671172618866,
"learning_rate": 4.926169550509787e-05,
"loss": 1.1339,
"step": 48
},
{
"epoch": 0.007640163289764428,
"grad_norm": 0.13235358893871307,
"learning_rate": 4.914121772213898e-05,
"loss": 1.0057,
"step": 51
},
{
"epoch": 0.00808958465975057,
"grad_norm": 0.14951473474502563,
"learning_rate": 4.9011809025775486e-05,
"loss": 1.0919,
"step": 54
},
{
"epoch": 0.008539006029736713,
"grad_norm": 0.1409119963645935,
"learning_rate": 4.887351729005726e-05,
"loss": 1.143,
"step": 57
},
{
"epoch": 0.008988427399722857,
"grad_norm": 0.13198496401309967,
"learning_rate": 4.8726393675266716e-05,
"loss": 1.0853,
"step": 60
},
{
"epoch": 0.009437848769709,
"grad_norm": 0.15319645404815674,
"learning_rate": 4.8570492608992325e-05,
"loss": 1.0534,
"step": 63
},
{
"epoch": 0.009887270139695143,
"grad_norm": 0.1532219797372818,
"learning_rate": 4.8405871765993433e-05,
"loss": 1.1016,
"step": 66
},
{
"epoch": 0.010336691509681285,
"grad_norm": 0.14198264479637146,
"learning_rate": 4.82325920468638e-05,
"loss": 1.029,
"step": 69
},
{
"epoch": 0.010786112879667428,
"grad_norm": 0.12415461987257004,
"learning_rate": 4.805071755550177e-05,
"loss": 1.1565,
"step": 72
},
{
"epoch": 0.01123553424965357,
"grad_norm": 0.1350441873073578,
"learning_rate": 4.7860315575395316e-05,
"loss": 1.1276,
"step": 75
},
{
"epoch": 0.011684955619639713,
"grad_norm": 0.12075574696063995,
"learning_rate": 4.766145654473095e-05,
"loss": 1.0753,
"step": 78
},
{
"epoch": 0.012134376989625857,
"grad_norm": 0.11550460755825043,
"learning_rate": 4.745421403033548e-05,
"loss": 1.0078,
"step": 81
},
{
"epoch": 0.012583798359612,
"grad_norm": 0.12455905228853226,
"learning_rate": 4.72386647004603e-05,
"loss": 1.0373,
"step": 84
},
{
"epoch": 0.012583798359612,
"eval_loss": 1.1157194375991821,
"eval_runtime": 312.8234,
"eval_samples_per_second": 35.94,
"eval_steps_per_second": 4.495,
"step": 84
},
{
"epoch": 0.013033219729598143,
"grad_norm": 0.12993893027305603,
"learning_rate": 4.701488829641845e-05,
"loss": 1.1313,
"step": 87
},
{
"epoch": 0.013482641099584285,
"grad_norm": 0.18705669045448303,
"learning_rate": 4.678296760308474e-05,
"loss": 1.2752,
"step": 90
},
{
"epoch": 0.013932062469570428,
"grad_norm": 0.14135660231113434,
"learning_rate": 4.6542988418269876e-05,
"loss": 1.1628,
"step": 93
},
{
"epoch": 0.01438148383955657,
"grad_norm": 0.11707276850938797,
"learning_rate": 4.629503952098011e-05,
"loss": 1.0801,
"step": 96
},
{
"epoch": 0.014830905209542713,
"grad_norm": 0.14167702198028564,
"learning_rate": 4.6039212638573833e-05,
"loss": 1.0452,
"step": 99
},
{
"epoch": 0.015280326579528856,
"grad_norm": 0.15983764827251434,
"learning_rate": 4.5775602412827604e-05,
"loss": 1.1844,
"step": 102
},
{
"epoch": 0.015729747949515,
"grad_norm": 0.12750358879566193,
"learning_rate": 4.55043063649239e-05,
"loss": 1.0877,
"step": 105
},
{
"epoch": 0.01617916931950114,
"grad_norm": 0.1487520933151245,
"learning_rate": 4.522542485937369e-05,
"loss": 1.1373,
"step": 108
},
{
"epoch": 0.016628590689487285,
"grad_norm": 0.16089919209480286,
"learning_rate": 4.493906106688712e-05,
"loss": 1.1806,
"step": 111
},
{
"epoch": 0.017078012059473426,
"grad_norm": 0.158865287899971,
"learning_rate": 4.4645320926206064e-05,
"loss": 1.0689,
"step": 114
},
{
"epoch": 0.01752743342945957,
"grad_norm": 0.16464003920555115,
"learning_rate": 4.434431310491267e-05,
"loss": 1.1319,
"step": 117
},
{
"epoch": 0.017976854799445715,
"grad_norm": 0.12468679249286652,
"learning_rate": 4.4036148959228365e-05,
"loss": 1.0237,
"step": 120
},
{
"epoch": 0.018426276169431856,
"grad_norm": 0.14475342631340027,
"learning_rate": 4.372094249281821e-05,
"loss": 1.1352,
"step": 123
},
{
"epoch": 0.018875697539418,
"grad_norm": 0.1386341154575348,
"learning_rate": 4.3398810314615876e-05,
"loss": 1.177,
"step": 126
},
{
"epoch": 0.018875697539418,
"eval_loss": 1.1068843603134155,
"eval_runtime": 312.5635,
"eval_samples_per_second": 35.97,
"eval_steps_per_second": 4.498,
"step": 126
},
{
"epoch": 0.01932511890940414,
"grad_norm": 0.13838274776935577,
"learning_rate": 4.306987159568479e-05,
"loss": 0.9996,
"step": 129
},
{
"epoch": 0.019774540279390285,
"grad_norm": 0.14386090636253357,
"learning_rate": 4.273424802513145e-05,
"loss": 0.9947,
"step": 132
},
{
"epoch": 0.020223961649376426,
"grad_norm": 0.14473505318164825,
"learning_rate": 4.239206376508717e-05,
"loss": 1.1779,
"step": 135
},
{
"epoch": 0.02067338301936257,
"grad_norm": 0.15975573658943176,
"learning_rate": 4.204344540477499e-05,
"loss": 1.2125,
"step": 138
},
{
"epoch": 0.021122804389348715,
"grad_norm": 0.12576735019683838,
"learning_rate": 4.16885219136787e-05,
"loss": 1.1312,
"step": 141
},
{
"epoch": 0.021572225759334856,
"grad_norm": 0.16916967928409576,
"learning_rate": 4.132742459383122e-05,
"loss": 1.0823,
"step": 144
},
{
"epoch": 0.022021647129321,
"grad_norm": 0.17954471707344055,
"learning_rate": 4.096028703124014e-05,
"loss": 1.0728,
"step": 147
},
{
"epoch": 0.02247106849930714,
"grad_norm": 0.11972087621688843,
"learning_rate": 4.058724504646834e-05,
"loss": 1.0886,
"step": 150
},
{
"epoch": 0.022920489869293285,
"grad_norm": 0.14640676975250244,
"learning_rate": 4.0208436644387834e-05,
"loss": 1.0472,
"step": 153
},
{
"epoch": 0.023369911239279426,
"grad_norm": 0.13775284588336945,
"learning_rate": 3.982400196312564e-05,
"loss": 1.0984,
"step": 156
},
{
"epoch": 0.02381933260926557,
"grad_norm": 0.14184071123600006,
"learning_rate": 3.943408322222049e-05,
"loss": 1.1431,
"step": 159
},
{
"epoch": 0.024268753979251715,
"grad_norm": 0.1685701608657837,
"learning_rate": 3.903882467000937e-05,
"loss": 1.1531,
"step": 162
},
{
"epoch": 0.024718175349237856,
"grad_norm": 0.1468999981880188,
"learning_rate": 3.8638372530263715e-05,
"loss": 1.1069,
"step": 165
},
{
"epoch": 0.025167596719224,
"grad_norm": 0.13278649747371674,
"learning_rate": 3.823287494809469e-05,
"loss": 1.1124,
"step": 168
},
{
"epoch": 0.025167596719224,
"eval_loss": 1.1012423038482666,
"eval_runtime": 312.1008,
"eval_samples_per_second": 36.024,
"eval_steps_per_second": 4.505,
"step": 168
},
{
"epoch": 0.02561701808921014,
"grad_norm": 0.180728018283844,
"learning_rate": 3.782248193514766e-05,
"loss": 1.0867,
"step": 171
},
{
"epoch": 0.026066439459196285,
"grad_norm": 0.15069565176963806,
"learning_rate": 3.740734531410626e-05,
"loss": 1.0624,
"step": 174
},
{
"epoch": 0.026515860829182426,
"grad_norm": 0.13892242312431335,
"learning_rate": 3.698761866252635e-05,
"loss": 1.0351,
"step": 177
},
{
"epoch": 0.02696528219916857,
"grad_norm": 0.1399199515581131,
"learning_rate": 3.656345725602089e-05,
"loss": 1.1609,
"step": 180
},
{
"epoch": 0.027414703569154715,
"grad_norm": 0.14930486679077148,
"learning_rate": 3.6135018010816477e-05,
"loss": 1.1117,
"step": 183
},
{
"epoch": 0.027864124939140856,
"grad_norm": 0.15556196868419647,
"learning_rate": 3.570245942570315e-05,
"loss": 1.1169,
"step": 186
},
{
"epoch": 0.028313546309127,
"grad_norm": 0.17272590100765228,
"learning_rate": 3.526594152339845e-05,
"loss": 1.115,
"step": 189
},
{
"epoch": 0.02876296767911314,
"grad_norm": 0.17533355951309204,
"learning_rate": 3.4825625791348096e-05,
"loss": 1.1298,
"step": 192
},
{
"epoch": 0.029212389049099285,
"grad_norm": 0.14778710901737213,
"learning_rate": 3.438167512198436e-05,
"loss": 1.1183,
"step": 195
},
{
"epoch": 0.029661810419085426,
"grad_norm": 0.14693984389305115,
"learning_rate": 3.393425375246503e-05,
"loss": 1.0647,
"step": 198
},
{
"epoch": 0.03011123178907157,
"grad_norm": 0.14994005858898163,
"learning_rate": 3.348352720391469e-05,
"loss": 1.0008,
"step": 201
},
{
"epoch": 0.03056065315905771,
"grad_norm": 0.1611510068178177,
"learning_rate": 3.3029662220191144e-05,
"loss": 1.094,
"step": 204
},
{
"epoch": 0.031010074529043856,
"grad_norm": 0.19615799188613892,
"learning_rate": 3.2572826706199305e-05,
"loss": 1.051,
"step": 207
},
{
"epoch": 0.03145949589903,
"grad_norm": 0.15789468586444855,
"learning_rate": 3.211318966577581e-05,
"loss": 1.0302,
"step": 210
},
{
"epoch": 0.03145949589903,
"eval_loss": 1.0971506834030151,
"eval_runtime": 312.684,
"eval_samples_per_second": 35.956,
"eval_steps_per_second": 4.497,
"step": 210
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.724628513606861e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}