lesso04's picture
Training in progress, step 100, checkpoint
b440ae5 verified
raw
history blame
20.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5012531328320802,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005012531328320802,
"grad_norm": 20.089641571044922,
"learning_rate": 1e-05,
"loss": 20.1344,
"step": 1
},
{
"epoch": 0.005012531328320802,
"eval_loss": 10.39857006072998,
"eval_runtime": 28.5845,
"eval_samples_per_second": 5.877,
"eval_steps_per_second": 0.735,
"step": 1
},
{
"epoch": 0.010025062656641603,
"grad_norm": 19.52311134338379,
"learning_rate": 2e-05,
"loss": 22.4254,
"step": 2
},
{
"epoch": 0.015037593984962405,
"grad_norm": 14.5244140625,
"learning_rate": 3e-05,
"loss": 20.1972,
"step": 3
},
{
"epoch": 0.020050125313283207,
"grad_norm": 21.34798812866211,
"learning_rate": 4e-05,
"loss": 22.7661,
"step": 4
},
{
"epoch": 0.02506265664160401,
"grad_norm": 17.969993591308594,
"learning_rate": 5e-05,
"loss": 19.4781,
"step": 5
},
{
"epoch": 0.03007518796992481,
"grad_norm": 19.54123306274414,
"learning_rate": 6e-05,
"loss": 21.2443,
"step": 6
},
{
"epoch": 0.03508771929824561,
"grad_norm": 19.612939834594727,
"learning_rate": 7e-05,
"loss": 20.0462,
"step": 7
},
{
"epoch": 0.040100250626566414,
"grad_norm": 23.63931655883789,
"learning_rate": 8e-05,
"loss": 17.3324,
"step": 8
},
{
"epoch": 0.045112781954887216,
"grad_norm": 20.25324821472168,
"learning_rate": 9e-05,
"loss": 16.8131,
"step": 9
},
{
"epoch": 0.045112781954887216,
"eval_loss": 8.083789825439453,
"eval_runtime": 28.6671,
"eval_samples_per_second": 5.86,
"eval_steps_per_second": 0.733,
"step": 9
},
{
"epoch": 0.05012531328320802,
"grad_norm": 22.54298210144043,
"learning_rate": 0.0001,
"loss": 15.429,
"step": 10
},
{
"epoch": 0.05513784461152882,
"grad_norm": 27.352766036987305,
"learning_rate": 9.99695413509548e-05,
"loss": 17.369,
"step": 11
},
{
"epoch": 0.06015037593984962,
"grad_norm": 24.816608428955078,
"learning_rate": 9.987820251299122e-05,
"loss": 12.8618,
"step": 12
},
{
"epoch": 0.06516290726817042,
"grad_norm": 22.56251335144043,
"learning_rate": 9.972609476841367e-05,
"loss": 11.7916,
"step": 13
},
{
"epoch": 0.07017543859649122,
"grad_norm": 24.081544876098633,
"learning_rate": 9.951340343707852e-05,
"loss": 8.7375,
"step": 14
},
{
"epoch": 0.07518796992481203,
"grad_norm": 16.781705856323242,
"learning_rate": 9.924038765061042e-05,
"loss": 5.8874,
"step": 15
},
{
"epoch": 0.08020050125313283,
"grad_norm": 21.11363983154297,
"learning_rate": 9.890738003669029e-05,
"loss": 5.2726,
"step": 16
},
{
"epoch": 0.08521303258145363,
"grad_norm": 17.36714744567871,
"learning_rate": 9.851478631379982e-05,
"loss": 5.7834,
"step": 17
},
{
"epoch": 0.09022556390977443,
"grad_norm": 12.790335655212402,
"learning_rate": 9.806308479691595e-05,
"loss": 3.3452,
"step": 18
},
{
"epoch": 0.09022556390977443,
"eval_loss": 1.4436625242233276,
"eval_runtime": 28.6933,
"eval_samples_per_second": 5.855,
"eval_steps_per_second": 0.732,
"step": 18
},
{
"epoch": 0.09523809523809523,
"grad_norm": 12.467353820800781,
"learning_rate": 9.755282581475769e-05,
"loss": 2.57,
"step": 19
},
{
"epoch": 0.10025062656641603,
"grad_norm": 12.546868324279785,
"learning_rate": 9.698463103929542e-05,
"loss": 2.2274,
"step": 20
},
{
"epoch": 0.10526315789473684,
"grad_norm": 15.894756317138672,
"learning_rate": 9.635919272833938e-05,
"loss": 2.0864,
"step": 21
},
{
"epoch": 0.11027568922305764,
"grad_norm": 11.902620315551758,
"learning_rate": 9.567727288213005e-05,
"loss": 1.9417,
"step": 22
},
{
"epoch": 0.11528822055137844,
"grad_norm": 7.588188648223877,
"learning_rate": 9.493970231495835e-05,
"loss": 1.6944,
"step": 23
},
{
"epoch": 0.12030075187969924,
"grad_norm": 7.501560688018799,
"learning_rate": 9.414737964294636e-05,
"loss": 1.6366,
"step": 24
},
{
"epoch": 0.12531328320802004,
"grad_norm": 4.7584309577941895,
"learning_rate": 9.330127018922194e-05,
"loss": 1.5247,
"step": 25
},
{
"epoch": 0.13032581453634084,
"grad_norm": 2.9300928115844727,
"learning_rate": 9.24024048078213e-05,
"loss": 1.4553,
"step": 26
},
{
"epoch": 0.13533834586466165,
"grad_norm": 3.2299232482910156,
"learning_rate": 9.145187862775209e-05,
"loss": 1.3305,
"step": 27
},
{
"epoch": 0.13533834586466165,
"eval_loss": 0.7951204180717468,
"eval_runtime": 28.6441,
"eval_samples_per_second": 5.865,
"eval_steps_per_second": 0.733,
"step": 27
},
{
"epoch": 0.14035087719298245,
"grad_norm": 3.232689142227173,
"learning_rate": 9.045084971874738e-05,
"loss": 1.2491,
"step": 28
},
{
"epoch": 0.14536340852130325,
"grad_norm": 5.9495344161987305,
"learning_rate": 8.940053768033609e-05,
"loss": 1.6638,
"step": 29
},
{
"epoch": 0.15037593984962405,
"grad_norm": 9.425905227661133,
"learning_rate": 8.83022221559489e-05,
"loss": 1.6769,
"step": 30
},
{
"epoch": 0.15538847117794485,
"grad_norm": 4.587433815002441,
"learning_rate": 8.715724127386972e-05,
"loss": 1.627,
"step": 31
},
{
"epoch": 0.16040100250626566,
"grad_norm": 2.9762625694274902,
"learning_rate": 8.596699001693255e-05,
"loss": 1.2048,
"step": 32
},
{
"epoch": 0.16541353383458646,
"grad_norm": 2.8089957237243652,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3888,
"step": 33
},
{
"epoch": 0.17042606516290726,
"grad_norm": 1.2890253067016602,
"learning_rate": 8.345653031794292e-05,
"loss": 1.34,
"step": 34
},
{
"epoch": 0.17543859649122806,
"grad_norm": 2.3775737285614014,
"learning_rate": 8.213938048432697e-05,
"loss": 1.4306,
"step": 35
},
{
"epoch": 0.18045112781954886,
"grad_norm": 2.7069523334503174,
"learning_rate": 8.07830737662829e-05,
"loss": 1.4448,
"step": 36
},
{
"epoch": 0.18045112781954886,
"eval_loss": 0.6974722743034363,
"eval_runtime": 28.6505,
"eval_samples_per_second": 5.864,
"eval_steps_per_second": 0.733,
"step": 36
},
{
"epoch": 0.18546365914786966,
"grad_norm": 2.9651219844818115,
"learning_rate": 7.938926261462366e-05,
"loss": 1.4529,
"step": 37
},
{
"epoch": 0.19047619047619047,
"grad_norm": 4.528919219970703,
"learning_rate": 7.795964517353735e-05,
"loss": 1.3969,
"step": 38
},
{
"epoch": 0.19548872180451127,
"grad_norm": 2.145470142364502,
"learning_rate": 7.649596321166024e-05,
"loss": 1.4055,
"step": 39
},
{
"epoch": 0.20050125313283207,
"grad_norm": 3.200515031814575,
"learning_rate": 7.500000000000001e-05,
"loss": 1.4332,
"step": 40
},
{
"epoch": 0.20551378446115287,
"grad_norm": 2.2597758769989014,
"learning_rate": 7.347357813929454e-05,
"loss": 1.4308,
"step": 41
},
{
"epoch": 0.21052631578947367,
"grad_norm": 1.4869681596755981,
"learning_rate": 7.191855733945387e-05,
"loss": 1.3283,
"step": 42
},
{
"epoch": 0.21553884711779447,
"grad_norm": 2.0091750621795654,
"learning_rate": 7.033683215379002e-05,
"loss": 1.3992,
"step": 43
},
{
"epoch": 0.22055137844611528,
"grad_norm": 5.310917377471924,
"learning_rate": 6.873032967079561e-05,
"loss": 1.599,
"step": 44
},
{
"epoch": 0.22556390977443608,
"grad_norm": 3.153137445449829,
"learning_rate": 6.710100716628344e-05,
"loss": 1.4812,
"step": 45
},
{
"epoch": 0.22556390977443608,
"eval_loss": 0.7087278366088867,
"eval_runtime": 28.9933,
"eval_samples_per_second": 5.794,
"eval_steps_per_second": 0.724,
"step": 45
},
{
"epoch": 0.23057644110275688,
"grad_norm": 2.8991687297821045,
"learning_rate": 6.545084971874738e-05,
"loss": 1.4612,
"step": 46
},
{
"epoch": 0.23558897243107768,
"grad_norm": 2.4431586265563965,
"learning_rate": 6.378186779084995e-05,
"loss": 1.4148,
"step": 47
},
{
"epoch": 0.24060150375939848,
"grad_norm": 2.443445920944214,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4315,
"step": 48
},
{
"epoch": 0.24561403508771928,
"grad_norm": 1.969445824623108,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.3969,
"step": 49
},
{
"epoch": 0.2506265664160401,
"grad_norm": 2.1230924129486084,
"learning_rate": 5.868240888334653e-05,
"loss": 1.4047,
"step": 50
},
{
"epoch": 0.2556390977443609,
"grad_norm": 1.230220079421997,
"learning_rate": 5.695865504800327e-05,
"loss": 1.2893,
"step": 51
},
{
"epoch": 0.2606516290726817,
"grad_norm": 2.455333709716797,
"learning_rate": 5.522642316338268e-05,
"loss": 1.219,
"step": 52
},
{
"epoch": 0.2656641604010025,
"grad_norm": 1.4559978246688843,
"learning_rate": 5.348782368720626e-05,
"loss": 1.2945,
"step": 53
},
{
"epoch": 0.2706766917293233,
"grad_norm": 3.1076526641845703,
"learning_rate": 5.174497483512506e-05,
"loss": 1.3378,
"step": 54
},
{
"epoch": 0.2706766917293233,
"eval_loss": 0.7307549715042114,
"eval_runtime": 28.6544,
"eval_samples_per_second": 5.863,
"eval_steps_per_second": 0.733,
"step": 54
},
{
"epoch": 0.2756892230576441,
"grad_norm": 1.1804949045181274,
"learning_rate": 5e-05,
"loss": 1.1941,
"step": 55
},
{
"epoch": 0.2807017543859649,
"grad_norm": 3.7159390449523926,
"learning_rate": 4.825502516487497e-05,
"loss": 1.599,
"step": 56
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.7314178943634033,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.2022,
"step": 57
},
{
"epoch": 0.2907268170426065,
"grad_norm": 3.72270131111145,
"learning_rate": 4.477357683661734e-05,
"loss": 1.6478,
"step": 58
},
{
"epoch": 0.2957393483709273,
"grad_norm": 2.9939489364624023,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.5453,
"step": 59
},
{
"epoch": 0.3007518796992481,
"grad_norm": 2.26654314994812,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3947,
"step": 60
},
{
"epoch": 0.3057644110275689,
"grad_norm": 2.3148698806762695,
"learning_rate": 3.960441545911204e-05,
"loss": 1.4923,
"step": 61
},
{
"epoch": 0.3107769423558897,
"grad_norm": 1.921850323677063,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4355,
"step": 62
},
{
"epoch": 0.3157894736842105,
"grad_norm": 2.0384600162506104,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.3469,
"step": 63
},
{
"epoch": 0.3157894736842105,
"eval_loss": 0.6959288716316223,
"eval_runtime": 28.6169,
"eval_samples_per_second": 5.871,
"eval_steps_per_second": 0.734,
"step": 63
},
{
"epoch": 0.3208020050125313,
"grad_norm": 3.557666301727295,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4865,
"step": 64
},
{
"epoch": 0.3258145363408521,
"grad_norm": 1.5931860208511353,
"learning_rate": 3.289899283371657e-05,
"loss": 1.4297,
"step": 65
},
{
"epoch": 0.3308270676691729,
"grad_norm": 2.942139148712158,
"learning_rate": 3.12696703292044e-05,
"loss": 1.4265,
"step": 66
},
{
"epoch": 0.3358395989974937,
"grad_norm": 1.7241272926330566,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.3485,
"step": 67
},
{
"epoch": 0.3408521303258145,
"grad_norm": 1.5445481538772583,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.4078,
"step": 68
},
{
"epoch": 0.3458646616541353,
"grad_norm": 1.5881041288375854,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.3533,
"step": 69
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.4929602146148682,
"learning_rate": 2.500000000000001e-05,
"loss": 1.32,
"step": 70
},
{
"epoch": 0.3558897243107769,
"grad_norm": 1.622667670249939,
"learning_rate": 2.350403678833976e-05,
"loss": 1.301,
"step": 71
},
{
"epoch": 0.3609022556390977,
"grad_norm": 1.4693689346313477,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.4372,
"step": 72
},
{
"epoch": 0.3609022556390977,
"eval_loss": 0.6896541118621826,
"eval_runtime": 28.7246,
"eval_samples_per_second": 5.849,
"eval_steps_per_second": 0.731,
"step": 72
},
{
"epoch": 0.3659147869674185,
"grad_norm": 9.016338348388672,
"learning_rate": 2.061073738537635e-05,
"loss": 1.7571,
"step": 73
},
{
"epoch": 0.37092731829573933,
"grad_norm": 2.0812020301818848,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.4578,
"step": 74
},
{
"epoch": 0.37593984962406013,
"grad_norm": 2.0818440914154053,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.4927,
"step": 75
},
{
"epoch": 0.38095238095238093,
"grad_norm": 1.5334950685501099,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.4068,
"step": 76
},
{
"epoch": 0.38596491228070173,
"grad_norm": 1.543641448020935,
"learning_rate": 1.526708147705013e-05,
"loss": 1.4417,
"step": 77
},
{
"epoch": 0.39097744360902253,
"grad_norm": 1.2889515161514282,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.3701,
"step": 78
},
{
"epoch": 0.39598997493734334,
"grad_norm": 1.4031617641448975,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.3879,
"step": 79
},
{
"epoch": 0.40100250626566414,
"grad_norm": 2.3280229568481445,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.4804,
"step": 80
},
{
"epoch": 0.40601503759398494,
"grad_norm": 1.840705394744873,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.4225,
"step": 81
},
{
"epoch": 0.40601503759398494,
"eval_loss": 0.6867606043815613,
"eval_runtime": 28.605,
"eval_samples_per_second": 5.873,
"eval_steps_per_second": 0.734,
"step": 81
},
{
"epoch": 0.41102756892230574,
"grad_norm": 1.6926554441452026,
"learning_rate": 9.549150281252633e-06,
"loss": 1.4528,
"step": 82
},
{
"epoch": 0.41604010025062654,
"grad_norm": 1.542953372001648,
"learning_rate": 8.548121372247918e-06,
"loss": 1.422,
"step": 83
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.3982118368148804,
"learning_rate": 7.597595192178702e-06,
"loss": 1.43,
"step": 84
},
{
"epoch": 0.42606516290726815,
"grad_norm": 1.7478983402252197,
"learning_rate": 6.698729810778065e-06,
"loss": 1.4112,
"step": 85
},
{
"epoch": 0.43107769423558895,
"grad_norm": 2.144606590270996,
"learning_rate": 5.852620357053651e-06,
"loss": 1.4026,
"step": 86
},
{
"epoch": 0.43609022556390975,
"grad_norm": 1.515303611755371,
"learning_rate": 5.060297685041659e-06,
"loss": 1.3563,
"step": 87
},
{
"epoch": 0.44110275689223055,
"grad_norm": 1.6609570980072021,
"learning_rate": 4.322727117869951e-06,
"loss": 1.4258,
"step": 88
},
{
"epoch": 0.44611528822055135,
"grad_norm": 1.8882701396942139,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.3771,
"step": 89
},
{
"epoch": 0.45112781954887216,
"grad_norm": 1.545371174812317,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.3798,
"step": 90
},
{
"epoch": 0.45112781954887216,
"eval_loss": 0.6865832805633545,
"eval_runtime": 28.6158,
"eval_samples_per_second": 5.871,
"eval_steps_per_second": 0.734,
"step": 90
},
{
"epoch": 0.45614035087719296,
"grad_norm": 2.012698173522949,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.448,
"step": 91
},
{
"epoch": 0.46115288220551376,
"grad_norm": 2.5337393283843994,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3807,
"step": 92
},
{
"epoch": 0.46616541353383456,
"grad_norm": 2.5152134895324707,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.42,
"step": 93
},
{
"epoch": 0.47117794486215536,
"grad_norm": 2.0130863189697266,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.4192,
"step": 94
},
{
"epoch": 0.47619047619047616,
"grad_norm": 1.2953547239303589,
"learning_rate": 7.596123493895991e-07,
"loss": 1.3369,
"step": 95
},
{
"epoch": 0.48120300751879697,
"grad_norm": 7.441637992858887,
"learning_rate": 4.865965629214819e-07,
"loss": 1.4595,
"step": 96
},
{
"epoch": 0.48621553884711777,
"grad_norm": 2.2585561275482178,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.4148,
"step": 97
},
{
"epoch": 0.49122807017543857,
"grad_norm": 1.8977596759796143,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.4021,
"step": 98
},
{
"epoch": 0.49624060150375937,
"grad_norm": 1.658085823059082,
"learning_rate": 3.04586490452119e-08,
"loss": 1.3434,
"step": 99
},
{
"epoch": 0.49624060150375937,
"eval_loss": 0.686565101146698,
"eval_runtime": 28.6385,
"eval_samples_per_second": 5.866,
"eval_steps_per_second": 0.733,
"step": 99
},
{
"epoch": 0.5012531328320802,
"grad_norm": 1.2815591096878052,
"learning_rate": 0.0,
"loss": 1.3255,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1729574724160717e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}