fedovtt's picture
Training in progress, step 75, checkpoint
8160b9e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.322234156820623,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004296455424274973,
"grad_norm": 17.51933479309082,
"learning_rate": 3.3333333333333335e-05,
"loss": 12.3387,
"step": 1
},
{
"epoch": 0.004296455424274973,
"eval_loss": 14.726823806762695,
"eval_runtime": 31.9634,
"eval_samples_per_second": 6.132,
"eval_steps_per_second": 3.066,
"step": 1
},
{
"epoch": 0.008592910848549946,
"grad_norm": 18.739076614379883,
"learning_rate": 6.666666666666667e-05,
"loss": 13.1398,
"step": 2
},
{
"epoch": 0.01288936627282492,
"grad_norm": 13.577031135559082,
"learning_rate": 0.0001,
"loss": 12.1759,
"step": 3
},
{
"epoch": 0.017185821697099892,
"grad_norm": 18.537343978881836,
"learning_rate": 9.99524110790929e-05,
"loss": 10.922,
"step": 4
},
{
"epoch": 0.021482277121374866,
"grad_norm": 17.2657470703125,
"learning_rate": 9.980973490458728e-05,
"loss": 8.0517,
"step": 5
},
{
"epoch": 0.02577873254564984,
"grad_norm": 23.952505111694336,
"learning_rate": 9.957224306869053e-05,
"loss": 6.6968,
"step": 6
},
{
"epoch": 0.03007518796992481,
"grad_norm": 15.488723754882812,
"learning_rate": 9.924038765061042e-05,
"loss": 5.8581,
"step": 7
},
{
"epoch": 0.034371643394199784,
"grad_norm": 14.229116439819336,
"learning_rate": 9.881480035599667e-05,
"loss": 5.3963,
"step": 8
},
{
"epoch": 0.03866809881847476,
"grad_norm": 13.335371017456055,
"learning_rate": 9.829629131445342e-05,
"loss": 4.8322,
"step": 9
},
{
"epoch": 0.04296455424274973,
"grad_norm": 10.152738571166992,
"learning_rate": 9.768584753741134e-05,
"loss": 3.6478,
"step": 10
},
{
"epoch": 0.047261009667024706,
"grad_norm": 7.60871696472168,
"learning_rate": 9.698463103929542e-05,
"loss": 3.1707,
"step": 11
},
{
"epoch": 0.05155746509129968,
"grad_norm": 12.572739601135254,
"learning_rate": 9.619397662556435e-05,
"loss": 2.8969,
"step": 12
},
{
"epoch": 0.055853920515574654,
"grad_norm": 6.163885116577148,
"learning_rate": 9.53153893518325e-05,
"loss": 2.7814,
"step": 13
},
{
"epoch": 0.06015037593984962,
"grad_norm": 9.737069129943848,
"learning_rate": 9.435054165891109e-05,
"loss": 2.9291,
"step": 14
},
{
"epoch": 0.0644468313641246,
"grad_norm": 15.192973136901855,
"learning_rate": 9.330127018922194e-05,
"loss": 2.7453,
"step": 15
},
{
"epoch": 0.06874328678839957,
"grad_norm": 6.198273658752441,
"learning_rate": 9.21695722906443e-05,
"loss": 2.5762,
"step": 16
},
{
"epoch": 0.07303974221267455,
"grad_norm": 9.232662200927734,
"learning_rate": 9.09576022144496e-05,
"loss": 2.4606,
"step": 17
},
{
"epoch": 0.07733619763694952,
"grad_norm": 10.766953468322754,
"learning_rate": 8.966766701456177e-05,
"loss": 2.9624,
"step": 18
},
{
"epoch": 0.08163265306122448,
"grad_norm": 20.73750877380371,
"learning_rate": 8.83022221559489e-05,
"loss": 3.0332,
"step": 19
},
{
"epoch": 0.08592910848549946,
"grad_norm": 8.655370712280273,
"learning_rate": 8.68638668405062e-05,
"loss": 2.4121,
"step": 20
},
{
"epoch": 0.09022556390977443,
"grad_norm": 1.5770183801651,
"learning_rate": 8.535533905932738e-05,
"loss": 2.5083,
"step": 21
},
{
"epoch": 0.09452201933404941,
"grad_norm": 9.22127914428711,
"learning_rate": 8.377951038078302e-05,
"loss": 2.5667,
"step": 22
},
{
"epoch": 0.09881847475832438,
"grad_norm": 1.7212750911712646,
"learning_rate": 8.213938048432697e-05,
"loss": 2.3424,
"step": 23
},
{
"epoch": 0.10311493018259936,
"grad_norm": 3.1742970943450928,
"learning_rate": 8.043807145043604e-05,
"loss": 2.6105,
"step": 24
},
{
"epoch": 0.10741138560687433,
"grad_norm": 15.835758209228516,
"learning_rate": 7.86788218175523e-05,
"loss": 2.5565,
"step": 25
},
{
"epoch": 0.10741138560687433,
"eval_loss": 2.6903557777404785,
"eval_runtime": 32.5076,
"eval_samples_per_second": 6.029,
"eval_steps_per_second": 3.015,
"step": 25
},
{
"epoch": 0.11170784103114931,
"grad_norm": 20.040693283081055,
"learning_rate": 7.68649804173412e-05,
"loss": 2.8217,
"step": 26
},
{
"epoch": 0.11600429645542427,
"grad_norm": 8.479001998901367,
"learning_rate": 7.500000000000001e-05,
"loss": 2.4105,
"step": 27
},
{
"epoch": 0.12030075187969924,
"grad_norm": 4.354423522949219,
"learning_rate": 7.308743066175172e-05,
"loss": 2.4466,
"step": 28
},
{
"epoch": 0.12459720730397422,
"grad_norm": 3.0479633808135986,
"learning_rate": 7.113091308703498e-05,
"loss": 2.496,
"step": 29
},
{
"epoch": 0.1288936627282492,
"grad_norm": 5.260918617248535,
"learning_rate": 6.91341716182545e-05,
"loss": 2.7696,
"step": 30
},
{
"epoch": 0.13319011815252416,
"grad_norm": 2.6381261348724365,
"learning_rate": 6.710100716628344e-05,
"loss": 2.4536,
"step": 31
},
{
"epoch": 0.13748657357679914,
"grad_norm": 2.3083853721618652,
"learning_rate": 6.503528997521366e-05,
"loss": 2.4325,
"step": 32
},
{
"epoch": 0.14178302900107412,
"grad_norm": 4.7310791015625,
"learning_rate": 6.294095225512603e-05,
"loss": 2.4458,
"step": 33
},
{
"epoch": 0.1460794844253491,
"grad_norm": 3.6756527423858643,
"learning_rate": 6.0821980696905146e-05,
"loss": 2.5719,
"step": 34
},
{
"epoch": 0.15037593984962405,
"grad_norm": 2.2964797019958496,
"learning_rate": 5.868240888334653e-05,
"loss": 2.5365,
"step": 35
},
{
"epoch": 0.15467239527389903,
"grad_norm": 4.085907459259033,
"learning_rate": 5.6526309611002594e-05,
"loss": 2.684,
"step": 36
},
{
"epoch": 0.158968850698174,
"grad_norm": 5.398529052734375,
"learning_rate": 5.435778713738292e-05,
"loss": 2.5946,
"step": 37
},
{
"epoch": 0.16326530612244897,
"grad_norm": 4.9892754554748535,
"learning_rate": 5.218096936826681e-05,
"loss": 2.7752,
"step": 38
},
{
"epoch": 0.16756176154672395,
"grad_norm": 4.568504810333252,
"learning_rate": 5e-05,
"loss": 2.5394,
"step": 39
},
{
"epoch": 0.17185821697099893,
"grad_norm": 5.603278636932373,
"learning_rate": 4.781903063173321e-05,
"loss": 2.3277,
"step": 40
},
{
"epoch": 0.1761546723952739,
"grad_norm": 5.817256927490234,
"learning_rate": 4.564221286261709e-05,
"loss": 2.7044,
"step": 41
},
{
"epoch": 0.18045112781954886,
"grad_norm": 5.201894283294678,
"learning_rate": 4.347369038899744e-05,
"loss": 2.8322,
"step": 42
},
{
"epoch": 0.18474758324382384,
"grad_norm": 2.171565055847168,
"learning_rate": 4.131759111665349e-05,
"loss": 2.495,
"step": 43
},
{
"epoch": 0.18904403866809882,
"grad_norm": 4.212491035461426,
"learning_rate": 3.917801930309486e-05,
"loss": 2.7337,
"step": 44
},
{
"epoch": 0.1933404940923738,
"grad_norm": 4.5573954582214355,
"learning_rate": 3.705904774487396e-05,
"loss": 2.6864,
"step": 45
},
{
"epoch": 0.19763694951664876,
"grad_norm": 7.083897113800049,
"learning_rate": 3.4964710024786354e-05,
"loss": 2.9271,
"step": 46
},
{
"epoch": 0.20193340494092374,
"grad_norm": 5.476693153381348,
"learning_rate": 3.289899283371657e-05,
"loss": 2.7912,
"step": 47
},
{
"epoch": 0.20622986036519872,
"grad_norm": 2.044529676437378,
"learning_rate": 3.086582838174551e-05,
"loss": 2.4948,
"step": 48
},
{
"epoch": 0.21052631578947367,
"grad_norm": 4.62539529800415,
"learning_rate": 2.886908691296504e-05,
"loss": 2.4246,
"step": 49
},
{
"epoch": 0.21482277121374865,
"grad_norm": 8.88616943359375,
"learning_rate": 2.6912569338248315e-05,
"loss": 3.1202,
"step": 50
},
{
"epoch": 0.21482277121374865,
"eval_loss": 2.50577712059021,
"eval_runtime": 32.5083,
"eval_samples_per_second": 6.029,
"eval_steps_per_second": 3.015,
"step": 50
},
{
"epoch": 0.21911922663802363,
"grad_norm": 18.871131896972656,
"learning_rate": 2.500000000000001e-05,
"loss": 2.5053,
"step": 51
},
{
"epoch": 0.22341568206229862,
"grad_norm": 14.663223266601562,
"learning_rate": 2.3135019582658802e-05,
"loss": 2.3519,
"step": 52
},
{
"epoch": 0.22771213748657357,
"grad_norm": 14.215468406677246,
"learning_rate": 2.132117818244771e-05,
"loss": 2.1756,
"step": 53
},
{
"epoch": 0.23200859291084855,
"grad_norm": 13.670672416687012,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.8397,
"step": 54
},
{
"epoch": 0.23630504833512353,
"grad_norm": 8.020715713500977,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.8855,
"step": 55
},
{
"epoch": 0.24060150375939848,
"grad_norm": 10.989826202392578,
"learning_rate": 1.622048961921699e-05,
"loss": 1.764,
"step": 56
},
{
"epoch": 0.24489795918367346,
"grad_norm": 5.959054946899414,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.7138,
"step": 57
},
{
"epoch": 0.24919441460794844,
"grad_norm": 7.55777645111084,
"learning_rate": 1.3136133159493802e-05,
"loss": 2.2298,
"step": 58
},
{
"epoch": 0.2534908700322234,
"grad_norm": 2.828930377960205,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.6894,
"step": 59
},
{
"epoch": 0.2577873254564984,
"grad_norm": 5.5222883224487305,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.7706,
"step": 60
},
{
"epoch": 0.2620837808807734,
"grad_norm": 3.887505054473877,
"learning_rate": 9.042397785550405e-06,
"loss": 1.8653,
"step": 61
},
{
"epoch": 0.2663802363050483,
"grad_norm": 3.8996193408966064,
"learning_rate": 7.830427709355725e-06,
"loss": 1.5454,
"step": 62
},
{
"epoch": 0.2706766917293233,
"grad_norm": 2.5269007682800293,
"learning_rate": 6.698729810778065e-06,
"loss": 2.0161,
"step": 63
},
{
"epoch": 0.2749731471535983,
"grad_norm": 6.614835739135742,
"learning_rate": 5.649458341088915e-06,
"loss": 2.288,
"step": 64
},
{
"epoch": 0.27926960257787325,
"grad_norm": 3.5039703845977783,
"learning_rate": 4.684610648167503e-06,
"loss": 1.7184,
"step": 65
},
{
"epoch": 0.28356605800214824,
"grad_norm": 3.9075820446014404,
"learning_rate": 3.8060233744356633e-06,
"loss": 2.2969,
"step": 66
},
{
"epoch": 0.2878625134264232,
"grad_norm": 3.1495635509490967,
"learning_rate": 3.0153689607045845e-06,
"loss": 2.1496,
"step": 67
},
{
"epoch": 0.2921589688506982,
"grad_norm": 2.422870397567749,
"learning_rate": 2.314152462588659e-06,
"loss": 2.0091,
"step": 68
},
{
"epoch": 0.2964554242749731,
"grad_norm": 1.5962601900100708,
"learning_rate": 1.70370868554659e-06,
"loss": 1.7433,
"step": 69
},
{
"epoch": 0.3007518796992481,
"grad_norm": 3.402613878250122,
"learning_rate": 1.1851996440033319e-06,
"loss": 2.3376,
"step": 70
},
{
"epoch": 0.3050483351235231,
"grad_norm": 5.880324363708496,
"learning_rate": 7.596123493895991e-07,
"loss": 2.4027,
"step": 71
},
{
"epoch": 0.30934479054779807,
"grad_norm": 4.647853374481201,
"learning_rate": 4.277569313094809e-07,
"loss": 2.0494,
"step": 72
},
{
"epoch": 0.31364124597207305,
"grad_norm": 3.6768651008605957,
"learning_rate": 1.9026509541272275e-07,
"loss": 2.2827,
"step": 73
},
{
"epoch": 0.317937701396348,
"grad_norm": 2.7547430992126465,
"learning_rate": 4.7588920907110094e-08,
"loss": 2.1691,
"step": 74
},
{
"epoch": 0.322234156820623,
"grad_norm": 2.9051625728607178,
"learning_rate": 0.0,
"loss": 2.2072,
"step": 75
},
{
"epoch": 0.322234156820623,
"eval_loss": 2.352761745452881,
"eval_runtime": 32.5272,
"eval_samples_per_second": 6.026,
"eval_steps_per_second": 3.013,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.119015678246912e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}