lesso04's picture
Training in progress, step 100, checkpoint
2a93121 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.19398642095053345,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0019398642095053346,
"grad_norm": 5.092199802398682,
"learning_rate": 1e-05,
"loss": 4.7383,
"step": 1
},
{
"epoch": 0.0019398642095053346,
"eval_loss": 4.821082592010498,
"eval_runtime": 9.658,
"eval_samples_per_second": 44.937,
"eval_steps_per_second": 5.695,
"step": 1
},
{
"epoch": 0.0038797284190106693,
"grad_norm": 4.449008464813232,
"learning_rate": 2e-05,
"loss": 4.6036,
"step": 2
},
{
"epoch": 0.005819592628516004,
"grad_norm": 4.542291164398193,
"learning_rate": 3e-05,
"loss": 4.7654,
"step": 3
},
{
"epoch": 0.007759456838021339,
"grad_norm": 4.831502437591553,
"learning_rate": 4e-05,
"loss": 4.6307,
"step": 4
},
{
"epoch": 0.009699321047526674,
"grad_norm": 4.798445224761963,
"learning_rate": 5e-05,
"loss": 4.689,
"step": 5
},
{
"epoch": 0.011639185257032008,
"grad_norm": 4.600784778594971,
"learning_rate": 6e-05,
"loss": 4.4962,
"step": 6
},
{
"epoch": 0.013579049466537343,
"grad_norm": 4.6975178718566895,
"learning_rate": 7e-05,
"loss": 4.6526,
"step": 7
},
{
"epoch": 0.015518913676042677,
"grad_norm": 4.297379970550537,
"learning_rate": 8e-05,
"loss": 4.4603,
"step": 8
},
{
"epoch": 0.01745877788554801,
"grad_norm": 4.602691650390625,
"learning_rate": 9e-05,
"loss": 4.0801,
"step": 9
},
{
"epoch": 0.01745877788554801,
"eval_loss": 3.935520887374878,
"eval_runtime": 7.7589,
"eval_samples_per_second": 55.935,
"eval_steps_per_second": 7.089,
"step": 9
},
{
"epoch": 0.019398642095053348,
"grad_norm": 3.76698899269104,
"learning_rate": 0.0001,
"loss": 3.9645,
"step": 10
},
{
"epoch": 0.02133850630455868,
"grad_norm": 3.087214708328247,
"learning_rate": 9.99695413509548e-05,
"loss": 3.9473,
"step": 11
},
{
"epoch": 0.023278370514064017,
"grad_norm": 3.322976589202881,
"learning_rate": 9.987820251299122e-05,
"loss": 3.652,
"step": 12
},
{
"epoch": 0.02521823472356935,
"grad_norm": 3.5539023876190186,
"learning_rate": 9.972609476841367e-05,
"loss": 3.3627,
"step": 13
},
{
"epoch": 0.027158098933074686,
"grad_norm": 3.190157413482666,
"learning_rate": 9.951340343707852e-05,
"loss": 3.1173,
"step": 14
},
{
"epoch": 0.029097963142580018,
"grad_norm": 3.779177188873291,
"learning_rate": 9.924038765061042e-05,
"loss": 2.8321,
"step": 15
},
{
"epoch": 0.031037827352085354,
"grad_norm": 3.7224929332733154,
"learning_rate": 9.890738003669029e-05,
"loss": 2.504,
"step": 16
},
{
"epoch": 0.03297769156159069,
"grad_norm": 3.4633424282073975,
"learning_rate": 9.851478631379982e-05,
"loss": 2.2944,
"step": 17
},
{
"epoch": 0.03491755577109602,
"grad_norm": 2.928765296936035,
"learning_rate": 9.806308479691595e-05,
"loss": 2.2625,
"step": 18
},
{
"epoch": 0.03491755577109602,
"eval_loss": 2.196943759918213,
"eval_runtime": 7.7522,
"eval_samples_per_second": 55.984,
"eval_steps_per_second": 7.095,
"step": 18
},
{
"epoch": 0.03685741998060136,
"grad_norm": 3.9546890258789062,
"learning_rate": 9.755282581475769e-05,
"loss": 2.3878,
"step": 19
},
{
"epoch": 0.038797284190106696,
"grad_norm": 4.105186462402344,
"learning_rate": 9.698463103929542e-05,
"loss": 2.1788,
"step": 20
},
{
"epoch": 0.040737148399612025,
"grad_norm": 3.8948333263397217,
"learning_rate": 9.635919272833938e-05,
"loss": 1.8275,
"step": 21
},
{
"epoch": 0.04267701260911736,
"grad_norm": 3.057760238647461,
"learning_rate": 9.567727288213005e-05,
"loss": 1.621,
"step": 22
},
{
"epoch": 0.0446168768186227,
"grad_norm": 2.3399696350097656,
"learning_rate": 9.493970231495835e-05,
"loss": 1.655,
"step": 23
},
{
"epoch": 0.04655674102812803,
"grad_norm": 2.517136335372925,
"learning_rate": 9.414737964294636e-05,
"loss": 1.7654,
"step": 24
},
{
"epoch": 0.04849660523763336,
"grad_norm": 2.6656148433685303,
"learning_rate": 9.330127018922194e-05,
"loss": 1.7639,
"step": 25
},
{
"epoch": 0.0504364694471387,
"grad_norm": 3.310025930404663,
"learning_rate": 9.24024048078213e-05,
"loss": 1.3531,
"step": 26
},
{
"epoch": 0.052376333656644035,
"grad_norm": 2.69954776763916,
"learning_rate": 9.145187862775209e-05,
"loss": 1.1658,
"step": 27
},
{
"epoch": 0.052376333656644035,
"eval_loss": 1.3316627740859985,
"eval_runtime": 7.7784,
"eval_samples_per_second": 55.796,
"eval_steps_per_second": 7.071,
"step": 27
},
{
"epoch": 0.05431619786614937,
"grad_norm": 2.0734622478485107,
"learning_rate": 9.045084971874738e-05,
"loss": 1.3399,
"step": 28
},
{
"epoch": 0.05625606207565471,
"grad_norm": 2.7428479194641113,
"learning_rate": 8.940053768033609e-05,
"loss": 1.4039,
"step": 29
},
{
"epoch": 0.058195926285160036,
"grad_norm": 1.9327542781829834,
"learning_rate": 8.83022221559489e-05,
"loss": 0.9577,
"step": 30
},
{
"epoch": 0.06013579049466537,
"grad_norm": 2.079310417175293,
"learning_rate": 8.715724127386972e-05,
"loss": 0.9913,
"step": 31
},
{
"epoch": 0.06207565470417071,
"grad_norm": 2.0377795696258545,
"learning_rate": 8.596699001693255e-05,
"loss": 0.9515,
"step": 32
},
{
"epoch": 0.06401551891367604,
"grad_norm": 1.990349292755127,
"learning_rate": 8.473291852294987e-05,
"loss": 1.1051,
"step": 33
},
{
"epoch": 0.06595538312318137,
"grad_norm": 2.111757755279541,
"learning_rate": 8.345653031794292e-05,
"loss": 1.0754,
"step": 34
},
{
"epoch": 0.06789524733268672,
"grad_norm": 2.61749005317688,
"learning_rate": 8.213938048432697e-05,
"loss": 1.0685,
"step": 35
},
{
"epoch": 0.06983511154219205,
"grad_norm": 1.9351005554199219,
"learning_rate": 8.07830737662829e-05,
"loss": 0.8439,
"step": 36
},
{
"epoch": 0.06983511154219205,
"eval_loss": 0.8821842670440674,
"eval_runtime": 7.7414,
"eval_samples_per_second": 56.063,
"eval_steps_per_second": 7.105,
"step": 36
},
{
"epoch": 0.07177497575169738,
"grad_norm": 1.9722400903701782,
"learning_rate": 7.938926261462366e-05,
"loss": 0.9561,
"step": 37
},
{
"epoch": 0.07371483996120272,
"grad_norm": 2.1616642475128174,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0384,
"step": 38
},
{
"epoch": 0.07565470417070805,
"grad_norm": 1.5446350574493408,
"learning_rate": 7.649596321166024e-05,
"loss": 0.7242,
"step": 39
},
{
"epoch": 0.07759456838021339,
"grad_norm": 1.5929559469223022,
"learning_rate": 7.500000000000001e-05,
"loss": 0.843,
"step": 40
},
{
"epoch": 0.07953443258971872,
"grad_norm": 1.5796291828155518,
"learning_rate": 7.347357813929454e-05,
"loss": 0.8905,
"step": 41
},
{
"epoch": 0.08147429679922405,
"grad_norm": 1.9490644931793213,
"learning_rate": 7.191855733945387e-05,
"loss": 0.7645,
"step": 42
},
{
"epoch": 0.08341416100872939,
"grad_norm": 1.8810855150222778,
"learning_rate": 7.033683215379002e-05,
"loss": 0.7698,
"step": 43
},
{
"epoch": 0.08535402521823472,
"grad_norm": 2.4278464317321777,
"learning_rate": 6.873032967079561e-05,
"loss": 0.795,
"step": 44
},
{
"epoch": 0.08729388942774007,
"grad_norm": 1.4299455881118774,
"learning_rate": 6.710100716628344e-05,
"loss": 0.5331,
"step": 45
},
{
"epoch": 0.08729388942774007,
"eval_loss": 0.6151111721992493,
"eval_runtime": 7.7408,
"eval_samples_per_second": 56.067,
"eval_steps_per_second": 7.105,
"step": 45
},
{
"epoch": 0.0892337536372454,
"grad_norm": 1.8526909351348877,
"learning_rate": 6.545084971874738e-05,
"loss": 0.7683,
"step": 46
},
{
"epoch": 0.09117361784675072,
"grad_norm": 1.4803686141967773,
"learning_rate": 6.378186779084995e-05,
"loss": 0.5607,
"step": 47
},
{
"epoch": 0.09311348205625607,
"grad_norm": 1.5188422203063965,
"learning_rate": 6.209609477998338e-05,
"loss": 0.4849,
"step": 48
},
{
"epoch": 0.0950533462657614,
"grad_norm": 1.1162447929382324,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.4439,
"step": 49
},
{
"epoch": 0.09699321047526673,
"grad_norm": 1.8776347637176514,
"learning_rate": 5.868240888334653e-05,
"loss": 0.575,
"step": 50
},
{
"epoch": 0.09893307468477207,
"grad_norm": 1.4063315391540527,
"learning_rate": 5.695865504800327e-05,
"loss": 0.5396,
"step": 51
},
{
"epoch": 0.1008729388942774,
"grad_norm": 1.2367199659347534,
"learning_rate": 5.522642316338268e-05,
"loss": 0.4351,
"step": 52
},
{
"epoch": 0.10281280310378274,
"grad_norm": 1.320259928703308,
"learning_rate": 5.348782368720626e-05,
"loss": 0.4758,
"step": 53
},
{
"epoch": 0.10475266731328807,
"grad_norm": 1.6481106281280518,
"learning_rate": 5.174497483512506e-05,
"loss": 0.5796,
"step": 54
},
{
"epoch": 0.10475266731328807,
"eval_loss": 0.4402259886264801,
"eval_runtime": 7.7586,
"eval_samples_per_second": 55.938,
"eval_steps_per_second": 7.089,
"step": 54
},
{
"epoch": 0.1066925315227934,
"grad_norm": 1.4706881046295166,
"learning_rate": 5e-05,
"loss": 0.5325,
"step": 55
},
{
"epoch": 0.10863239573229874,
"grad_norm": 1.681864619255066,
"learning_rate": 4.825502516487497e-05,
"loss": 0.5644,
"step": 56
},
{
"epoch": 0.11057225994180407,
"grad_norm": 1.4026848077774048,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.3279,
"step": 57
},
{
"epoch": 0.11251212415130941,
"grad_norm": 1.2170274257659912,
"learning_rate": 4.477357683661734e-05,
"loss": 0.3805,
"step": 58
},
{
"epoch": 0.11445198836081474,
"grad_norm": 1.9468257427215576,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.5186,
"step": 59
},
{
"epoch": 0.11639185257032007,
"grad_norm": 1.2916088104248047,
"learning_rate": 4.131759111665349e-05,
"loss": 0.2757,
"step": 60
},
{
"epoch": 0.11833171677982542,
"grad_norm": 1.548339605331421,
"learning_rate": 3.960441545911204e-05,
"loss": 0.3923,
"step": 61
},
{
"epoch": 0.12027158098933075,
"grad_norm": 2.265233039855957,
"learning_rate": 3.790390522001662e-05,
"loss": 0.6105,
"step": 62
},
{
"epoch": 0.12221144519883609,
"grad_norm": 1.4970910549163818,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.4189,
"step": 63
},
{
"epoch": 0.12221144519883609,
"eval_loss": 0.3442797064781189,
"eval_runtime": 7.745,
"eval_samples_per_second": 56.036,
"eval_steps_per_second": 7.101,
"step": 63
},
{
"epoch": 0.12415130940834142,
"grad_norm": 1.1539958715438843,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.2965,
"step": 64
},
{
"epoch": 0.12609117361784675,
"grad_norm": 1.2717357873916626,
"learning_rate": 3.289899283371657e-05,
"loss": 0.3565,
"step": 65
},
{
"epoch": 0.1280310378273521,
"grad_norm": 1.1463826894760132,
"learning_rate": 3.12696703292044e-05,
"loss": 0.3243,
"step": 66
},
{
"epoch": 0.12997090203685743,
"grad_norm": 2.083845615386963,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.4723,
"step": 67
},
{
"epoch": 0.13191076624636275,
"grad_norm": 1.220322608947754,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.3566,
"step": 68
},
{
"epoch": 0.1338506304558681,
"grad_norm": 1.4377970695495605,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.384,
"step": 69
},
{
"epoch": 0.13579049466537343,
"grad_norm": 1.6574350595474243,
"learning_rate": 2.500000000000001e-05,
"loss": 0.3038,
"step": 70
},
{
"epoch": 0.13773035887487875,
"grad_norm": 1.2186497449874878,
"learning_rate": 2.350403678833976e-05,
"loss": 0.2762,
"step": 71
},
{
"epoch": 0.1396702230843841,
"grad_norm": 1.6001431941986084,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.25,
"step": 72
},
{
"epoch": 0.1396702230843841,
"eval_loss": 0.3006916642189026,
"eval_runtime": 7.7498,
"eval_samples_per_second": 56.001,
"eval_steps_per_second": 7.097,
"step": 72
},
{
"epoch": 0.14161008729388944,
"grad_norm": 1.6216119527816772,
"learning_rate": 2.061073738537635e-05,
"loss": 0.391,
"step": 73
},
{
"epoch": 0.14354995150339475,
"grad_norm": 2.5727713108062744,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.5055,
"step": 74
},
{
"epoch": 0.1454898157129001,
"grad_norm": 1.4488884210586548,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.3118,
"step": 75
},
{
"epoch": 0.14742967992240544,
"grad_norm": 1.5707823038101196,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.329,
"step": 76
},
{
"epoch": 0.14936954413191075,
"grad_norm": 0.8776403069496155,
"learning_rate": 1.526708147705013e-05,
"loss": 0.2757,
"step": 77
},
{
"epoch": 0.1513094083414161,
"grad_norm": 1.9407877922058105,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.348,
"step": 78
},
{
"epoch": 0.15324927255092144,
"grad_norm": 1.6693557500839233,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.3072,
"step": 79
},
{
"epoch": 0.15518913676042678,
"grad_norm": 0.9786368608474731,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.2356,
"step": 80
},
{
"epoch": 0.1571290009699321,
"grad_norm": 1.0574125051498413,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.1978,
"step": 81
},
{
"epoch": 0.1571290009699321,
"eval_loss": 0.2722632586956024,
"eval_runtime": 7.7369,
"eval_samples_per_second": 56.095,
"eval_steps_per_second": 7.109,
"step": 81
},
{
"epoch": 0.15906886517943744,
"grad_norm": 1.628749966621399,
"learning_rate": 9.549150281252633e-06,
"loss": 0.2687,
"step": 82
},
{
"epoch": 0.16100872938894278,
"grad_norm": 1.5231091976165771,
"learning_rate": 8.548121372247918e-06,
"loss": 0.307,
"step": 83
},
{
"epoch": 0.1629485935984481,
"grad_norm": 0.9854992032051086,
"learning_rate": 7.597595192178702e-06,
"loss": 0.183,
"step": 84
},
{
"epoch": 0.16488845780795344,
"grad_norm": 1.5809539556503296,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2977,
"step": 85
},
{
"epoch": 0.16682832201745879,
"grad_norm": 1.4216303825378418,
"learning_rate": 5.852620357053651e-06,
"loss": 0.275,
"step": 86
},
{
"epoch": 0.1687681862269641,
"grad_norm": 1.5944830179214478,
"learning_rate": 5.060297685041659e-06,
"loss": 0.296,
"step": 87
},
{
"epoch": 0.17070805043646944,
"grad_norm": 1.6960701942443848,
"learning_rate": 4.322727117869951e-06,
"loss": 0.3684,
"step": 88
},
{
"epoch": 0.1726479146459748,
"grad_norm": 1.3024052381515503,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.304,
"step": 89
},
{
"epoch": 0.17458777885548013,
"grad_norm": 1.6905990839004517,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.3042,
"step": 90
},
{
"epoch": 0.17458777885548013,
"eval_loss": 0.26292115449905396,
"eval_runtime": 7.7439,
"eval_samples_per_second": 56.044,
"eval_steps_per_second": 7.102,
"step": 90
},
{
"epoch": 0.17652764306498545,
"grad_norm": 1.0282114744186401,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.227,
"step": 91
},
{
"epoch": 0.1784675072744908,
"grad_norm": 1.2541886568069458,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.2273,
"step": 92
},
{
"epoch": 0.18040737148399613,
"grad_norm": 1.7235442399978638,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.3701,
"step": 93
},
{
"epoch": 0.18234723569350145,
"grad_norm": 1.2627227306365967,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.3245,
"step": 94
},
{
"epoch": 0.1842870999030068,
"grad_norm": 1.996457815170288,
"learning_rate": 7.596123493895991e-07,
"loss": 0.3793,
"step": 95
},
{
"epoch": 0.18622696411251213,
"grad_norm": 1.1682064533233643,
"learning_rate": 4.865965629214819e-07,
"loss": 0.2895,
"step": 96
},
{
"epoch": 0.18816682832201745,
"grad_norm": 0.9628655910491943,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.2685,
"step": 97
},
{
"epoch": 0.1901066925315228,
"grad_norm": 1.4077612161636353,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.2579,
"step": 98
},
{
"epoch": 0.19204655674102813,
"grad_norm": 1.1800073385238647,
"learning_rate": 3.04586490452119e-08,
"loss": 0.2709,
"step": 99
},
{
"epoch": 0.19204655674102813,
"eval_loss": 0.26203885674476624,
"eval_runtime": 7.7497,
"eval_samples_per_second": 56.002,
"eval_steps_per_second": 7.097,
"step": 99
},
{
"epoch": 0.19398642095053345,
"grad_norm": 1.5470740795135498,
"learning_rate": 0.0,
"loss": 0.2969,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2596262859767808.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}