lesso05's picture
Training in progress, step 100, checkpoint
27e5e0f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2949852507374631,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029498525073746312,
"grad_norm": 2.6730434894561768,
"learning_rate": 1e-05,
"loss": 4.059,
"step": 1
},
{
"epoch": 0.0029498525073746312,
"eval_loss": 2.0677247047424316,
"eval_runtime": 44.3198,
"eval_samples_per_second": 6.453,
"eval_steps_per_second": 0.812,
"step": 1
},
{
"epoch": 0.0058997050147492625,
"grad_norm": 1.780819296836853,
"learning_rate": 2e-05,
"loss": 3.1111,
"step": 2
},
{
"epoch": 0.008849557522123894,
"grad_norm": 3.14420485496521,
"learning_rate": 3e-05,
"loss": 3.5452,
"step": 3
},
{
"epoch": 0.011799410029498525,
"grad_norm": 4.393539905548096,
"learning_rate": 4e-05,
"loss": 4.6015,
"step": 4
},
{
"epoch": 0.014749262536873156,
"grad_norm": 3.3870508670806885,
"learning_rate": 5e-05,
"loss": 4.1767,
"step": 5
},
{
"epoch": 0.017699115044247787,
"grad_norm": 2.174534559249878,
"learning_rate": 6e-05,
"loss": 3.0682,
"step": 6
},
{
"epoch": 0.02064896755162242,
"grad_norm": 2.650453567504883,
"learning_rate": 7e-05,
"loss": 3.5393,
"step": 7
},
{
"epoch": 0.02359882005899705,
"grad_norm": 2.851867437362671,
"learning_rate": 8e-05,
"loss": 3.6709,
"step": 8
},
{
"epoch": 0.02654867256637168,
"grad_norm": 5.362432956695557,
"learning_rate": 9e-05,
"loss": 3.7535,
"step": 9
},
{
"epoch": 0.02654867256637168,
"eval_loss": 1.4621776342391968,
"eval_runtime": 44.3203,
"eval_samples_per_second": 6.453,
"eval_steps_per_second": 0.812,
"step": 9
},
{
"epoch": 0.029498525073746312,
"grad_norm": 3.1995365619659424,
"learning_rate": 0.0001,
"loss": 3.1976,
"step": 10
},
{
"epoch": 0.032448377581120944,
"grad_norm": 2.1539673805236816,
"learning_rate": 9.99695413509548e-05,
"loss": 2.5042,
"step": 11
},
{
"epoch": 0.035398230088495575,
"grad_norm": 2.2371580600738525,
"learning_rate": 9.987820251299122e-05,
"loss": 2.484,
"step": 12
},
{
"epoch": 0.038348082595870206,
"grad_norm": 2.3283255100250244,
"learning_rate": 9.972609476841367e-05,
"loss": 1.8651,
"step": 13
},
{
"epoch": 0.04129793510324484,
"grad_norm": 2.758920192718506,
"learning_rate": 9.951340343707852e-05,
"loss": 2.2593,
"step": 14
},
{
"epoch": 0.04424778761061947,
"grad_norm": 2.5637218952178955,
"learning_rate": 9.924038765061042e-05,
"loss": 2.2398,
"step": 15
},
{
"epoch": 0.0471976401179941,
"grad_norm": 3.0846474170684814,
"learning_rate": 9.890738003669029e-05,
"loss": 1.9655,
"step": 16
},
{
"epoch": 0.05014749262536873,
"grad_norm": 3.055612087249756,
"learning_rate": 9.851478631379982e-05,
"loss": 2.5861,
"step": 17
},
{
"epoch": 0.05309734513274336,
"grad_norm": 3.042341470718384,
"learning_rate": 9.806308479691595e-05,
"loss": 2.08,
"step": 18
},
{
"epoch": 0.05309734513274336,
"eval_loss": 1.0477064847946167,
"eval_runtime": 44.3486,
"eval_samples_per_second": 6.449,
"eval_steps_per_second": 0.812,
"step": 18
},
{
"epoch": 0.05604719764011799,
"grad_norm": 2.4983811378479004,
"learning_rate": 9.755282581475769e-05,
"loss": 1.7399,
"step": 19
},
{
"epoch": 0.058997050147492625,
"grad_norm": 2.7760469913482666,
"learning_rate": 9.698463103929542e-05,
"loss": 1.3545,
"step": 20
},
{
"epoch": 0.061946902654867256,
"grad_norm": 2.5731499195098877,
"learning_rate": 9.635919272833938e-05,
"loss": 2.3881,
"step": 21
},
{
"epoch": 0.06489675516224189,
"grad_norm": 1.8936318159103394,
"learning_rate": 9.567727288213005e-05,
"loss": 2.1502,
"step": 22
},
{
"epoch": 0.06784660766961652,
"grad_norm": 1.976555585861206,
"learning_rate": 9.493970231495835e-05,
"loss": 2.1209,
"step": 23
},
{
"epoch": 0.07079646017699115,
"grad_norm": 2.4951541423797607,
"learning_rate": 9.414737964294636e-05,
"loss": 1.9717,
"step": 24
},
{
"epoch": 0.07374631268436578,
"grad_norm": 1.7388063669204712,
"learning_rate": 9.330127018922194e-05,
"loss": 2.8964,
"step": 25
},
{
"epoch": 0.07669616519174041,
"grad_norm": 1.7615406513214111,
"learning_rate": 9.24024048078213e-05,
"loss": 1.5812,
"step": 26
},
{
"epoch": 0.07964601769911504,
"grad_norm": 2.4763855934143066,
"learning_rate": 9.145187862775209e-05,
"loss": 2.2404,
"step": 27
},
{
"epoch": 0.07964601769911504,
"eval_loss": 1.0080937147140503,
"eval_runtime": 44.3909,
"eval_samples_per_second": 6.443,
"eval_steps_per_second": 0.811,
"step": 27
},
{
"epoch": 0.08259587020648967,
"grad_norm": 2.52359676361084,
"learning_rate": 9.045084971874738e-05,
"loss": 1.7129,
"step": 28
},
{
"epoch": 0.0855457227138643,
"grad_norm": 2.9214510917663574,
"learning_rate": 8.940053768033609e-05,
"loss": 2.0964,
"step": 29
},
{
"epoch": 0.08849557522123894,
"grad_norm": 2.418994188308716,
"learning_rate": 8.83022221559489e-05,
"loss": 1.9937,
"step": 30
},
{
"epoch": 0.09144542772861357,
"grad_norm": 2.003028154373169,
"learning_rate": 8.715724127386972e-05,
"loss": 2.5031,
"step": 31
},
{
"epoch": 0.0943952802359882,
"grad_norm": 3.0602455139160156,
"learning_rate": 8.596699001693255e-05,
"loss": 1.8419,
"step": 32
},
{
"epoch": 0.09734513274336283,
"grad_norm": 3.8223624229431152,
"learning_rate": 8.473291852294987e-05,
"loss": 1.7186,
"step": 33
},
{
"epoch": 0.10029498525073746,
"grad_norm": 2.1360504627227783,
"learning_rate": 8.345653031794292e-05,
"loss": 2.8072,
"step": 34
},
{
"epoch": 0.10324483775811209,
"grad_norm": 2.5613925457000732,
"learning_rate": 8.213938048432697e-05,
"loss": 1.4713,
"step": 35
},
{
"epoch": 0.10619469026548672,
"grad_norm": 4.114505767822266,
"learning_rate": 8.07830737662829e-05,
"loss": 1.8798,
"step": 36
},
{
"epoch": 0.10619469026548672,
"eval_loss": 0.9803729057312012,
"eval_runtime": 44.3399,
"eval_samples_per_second": 6.45,
"eval_steps_per_second": 0.812,
"step": 36
},
{
"epoch": 0.10914454277286136,
"grad_norm": 2.2606899738311768,
"learning_rate": 7.938926261462366e-05,
"loss": 2.1288,
"step": 37
},
{
"epoch": 0.11209439528023599,
"grad_norm": 2.3474953174591064,
"learning_rate": 7.795964517353735e-05,
"loss": 1.8458,
"step": 38
},
{
"epoch": 0.11504424778761062,
"grad_norm": 2.764202833175659,
"learning_rate": 7.649596321166024e-05,
"loss": 3.0195,
"step": 39
},
{
"epoch": 0.11799410029498525,
"grad_norm": 2.4660158157348633,
"learning_rate": 7.500000000000001e-05,
"loss": 1.827,
"step": 40
},
{
"epoch": 0.12094395280235988,
"grad_norm": 2.5078041553497314,
"learning_rate": 7.347357813929454e-05,
"loss": 1.3851,
"step": 41
},
{
"epoch": 0.12389380530973451,
"grad_norm": 2.1289970874786377,
"learning_rate": 7.191855733945387e-05,
"loss": 2.4684,
"step": 42
},
{
"epoch": 0.12684365781710916,
"grad_norm": 3.008479595184326,
"learning_rate": 7.033683215379002e-05,
"loss": 1.7663,
"step": 43
},
{
"epoch": 0.12979351032448377,
"grad_norm": 2.681143045425415,
"learning_rate": 6.873032967079561e-05,
"loss": 1.6546,
"step": 44
},
{
"epoch": 0.13274336283185842,
"grad_norm": 3.201345443725586,
"learning_rate": 6.710100716628344e-05,
"loss": 2.0512,
"step": 45
},
{
"epoch": 0.13274336283185842,
"eval_loss": 0.9570127129554749,
"eval_runtime": 44.365,
"eval_samples_per_second": 6.447,
"eval_steps_per_second": 0.811,
"step": 45
},
{
"epoch": 0.13569321533923304,
"grad_norm": 1.8258627653121948,
"learning_rate": 6.545084971874738e-05,
"loss": 1.5324,
"step": 46
},
{
"epoch": 0.13864306784660768,
"grad_norm": 1.8189584016799927,
"learning_rate": 6.378186779084995e-05,
"loss": 1.7706,
"step": 47
},
{
"epoch": 0.1415929203539823,
"grad_norm": 2.40757155418396,
"learning_rate": 6.209609477998338e-05,
"loss": 2.6126,
"step": 48
},
{
"epoch": 0.14454277286135694,
"grad_norm": 2.335020065307617,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.326,
"step": 49
},
{
"epoch": 0.14749262536873156,
"grad_norm": 1.997075080871582,
"learning_rate": 5.868240888334653e-05,
"loss": 2.5148,
"step": 50
},
{
"epoch": 0.1504424778761062,
"grad_norm": 1.5208971500396729,
"learning_rate": 5.695865504800327e-05,
"loss": 2.0899,
"step": 51
},
{
"epoch": 0.15339233038348082,
"grad_norm": 3.4418222904205322,
"learning_rate": 5.522642316338268e-05,
"loss": 2.4397,
"step": 52
},
{
"epoch": 0.15634218289085547,
"grad_norm": 2.2392663955688477,
"learning_rate": 5.348782368720626e-05,
"loss": 1.8609,
"step": 53
},
{
"epoch": 0.1592920353982301,
"grad_norm": 2.139505386352539,
"learning_rate": 5.174497483512506e-05,
"loss": 2.0997,
"step": 54
},
{
"epoch": 0.1592920353982301,
"eval_loss": 0.9444321990013123,
"eval_runtime": 44.3343,
"eval_samples_per_second": 6.451,
"eval_steps_per_second": 0.812,
"step": 54
},
{
"epoch": 0.16224188790560473,
"grad_norm": 2.0930261611938477,
"learning_rate": 5e-05,
"loss": 1.7881,
"step": 55
},
{
"epoch": 0.16519174041297935,
"grad_norm": 2.2204813957214355,
"learning_rate": 4.825502516487497e-05,
"loss": 2.7174,
"step": 56
},
{
"epoch": 0.168141592920354,
"grad_norm": 2.269932746887207,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.685,
"step": 57
},
{
"epoch": 0.1710914454277286,
"grad_norm": 2.4379570484161377,
"learning_rate": 4.477357683661734e-05,
"loss": 1.3988,
"step": 58
},
{
"epoch": 0.17404129793510326,
"grad_norm": 2.7614104747772217,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.7063,
"step": 59
},
{
"epoch": 0.17699115044247787,
"grad_norm": 3.342813491821289,
"learning_rate": 4.131759111665349e-05,
"loss": 2.0876,
"step": 60
},
{
"epoch": 0.17994100294985252,
"grad_norm": 1.874851107597351,
"learning_rate": 3.960441545911204e-05,
"loss": 2.41,
"step": 61
},
{
"epoch": 0.18289085545722714,
"grad_norm": 2.886047124862671,
"learning_rate": 3.790390522001662e-05,
"loss": 1.7038,
"step": 62
},
{
"epoch": 0.18584070796460178,
"grad_norm": 2.56038498878479,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.9275,
"step": 63
},
{
"epoch": 0.18584070796460178,
"eval_loss": 0.9344019293785095,
"eval_runtime": 44.3805,
"eval_samples_per_second": 6.444,
"eval_steps_per_second": 0.811,
"step": 63
},
{
"epoch": 0.1887905604719764,
"grad_norm": 2.8928627967834473,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.076,
"step": 64
},
{
"epoch": 0.19174041297935104,
"grad_norm": 2.5595240592956543,
"learning_rate": 3.289899283371657e-05,
"loss": 1.8227,
"step": 65
},
{
"epoch": 0.19469026548672566,
"grad_norm": 2.3376636505126953,
"learning_rate": 3.12696703292044e-05,
"loss": 1.8931,
"step": 66
},
{
"epoch": 0.1976401179941003,
"grad_norm": 1.9027594327926636,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.4452,
"step": 67
},
{
"epoch": 0.20058997050147492,
"grad_norm": 2.5550875663757324,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.9689,
"step": 68
},
{
"epoch": 0.20353982300884957,
"grad_norm": 2.062281608581543,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.372,
"step": 69
},
{
"epoch": 0.20648967551622419,
"grad_norm": 2.665297508239746,
"learning_rate": 2.500000000000001e-05,
"loss": 1.7928,
"step": 70
},
{
"epoch": 0.20943952802359883,
"grad_norm": 2.0745885372161865,
"learning_rate": 2.350403678833976e-05,
"loss": 1.4635,
"step": 71
},
{
"epoch": 0.21238938053097345,
"grad_norm": 2.8449113368988037,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.0457,
"step": 72
},
{
"epoch": 0.21238938053097345,
"eval_loss": 0.9305321574211121,
"eval_runtime": 44.3666,
"eval_samples_per_second": 6.446,
"eval_steps_per_second": 0.811,
"step": 72
},
{
"epoch": 0.2153392330383481,
"grad_norm": 2.6109533309936523,
"learning_rate": 2.061073738537635e-05,
"loss": 1.7443,
"step": 73
},
{
"epoch": 0.2182890855457227,
"grad_norm": 2.955402135848999,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.8344,
"step": 74
},
{
"epoch": 0.22123893805309736,
"grad_norm": 3.2939834594726562,
"learning_rate": 1.7860619515673033e-05,
"loss": 2.3507,
"step": 75
},
{
"epoch": 0.22418879056047197,
"grad_norm": 4.007279872894287,
"learning_rate": 1.6543469682057106e-05,
"loss": 2.1104,
"step": 76
},
{
"epoch": 0.22713864306784662,
"grad_norm": 1.9368032217025757,
"learning_rate": 1.526708147705013e-05,
"loss": 1.6227,
"step": 77
},
{
"epoch": 0.23008849557522124,
"grad_norm": 2.9845495223999023,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.7561,
"step": 78
},
{
"epoch": 0.23303834808259588,
"grad_norm": 2.2668330669403076,
"learning_rate": 1.2842758726130283e-05,
"loss": 2.015,
"step": 79
},
{
"epoch": 0.2359882005899705,
"grad_norm": 1.9236528873443604,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.1305,
"step": 80
},
{
"epoch": 0.23893805309734514,
"grad_norm": 2.2773380279541016,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.778,
"step": 81
},
{
"epoch": 0.23893805309734514,
"eval_loss": 0.92861008644104,
"eval_runtime": 44.4064,
"eval_samples_per_second": 6.441,
"eval_steps_per_second": 0.811,
"step": 81
},
{
"epoch": 0.24188790560471976,
"grad_norm": 1.7460155487060547,
"learning_rate": 9.549150281252633e-06,
"loss": 1.777,
"step": 82
},
{
"epoch": 0.2448377581120944,
"grad_norm": 2.7454729080200195,
"learning_rate": 8.548121372247918e-06,
"loss": 1.1967,
"step": 83
},
{
"epoch": 0.24778761061946902,
"grad_norm": 2.0686800479888916,
"learning_rate": 7.597595192178702e-06,
"loss": 1.6214,
"step": 84
},
{
"epoch": 0.25073746312684364,
"grad_norm": 2.3937289714813232,
"learning_rate": 6.698729810778065e-06,
"loss": 1.7947,
"step": 85
},
{
"epoch": 0.2536873156342183,
"grad_norm": 2.584099054336548,
"learning_rate": 5.852620357053651e-06,
"loss": 2.4677,
"step": 86
},
{
"epoch": 0.25663716814159293,
"grad_norm": 3.2739224433898926,
"learning_rate": 5.060297685041659e-06,
"loss": 2.9313,
"step": 87
},
{
"epoch": 0.25958702064896755,
"grad_norm": 2.9258463382720947,
"learning_rate": 4.322727117869951e-06,
"loss": 1.6279,
"step": 88
},
{
"epoch": 0.26253687315634217,
"grad_norm": 2.1165103912353516,
"learning_rate": 3.6408072716606346e-06,
"loss": 2.1038,
"step": 89
},
{
"epoch": 0.26548672566371684,
"grad_norm": 1.7193412780761719,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.9531,
"step": 90
},
{
"epoch": 0.26548672566371684,
"eval_loss": 0.928329586982727,
"eval_runtime": 44.365,
"eval_samples_per_second": 6.447,
"eval_steps_per_second": 0.811,
"step": 90
},
{
"epoch": 0.26843657817109146,
"grad_norm": 3.0313870906829834,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.409,
"step": 91
},
{
"epoch": 0.2713864306784661,
"grad_norm": 2.6027047634124756,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.8437,
"step": 92
},
{
"epoch": 0.2743362831858407,
"grad_norm": 1.7454715967178345,
"learning_rate": 1.4852136862001764e-06,
"loss": 2.4698,
"step": 93
},
{
"epoch": 0.27728613569321536,
"grad_norm": 4.085173606872559,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.5345,
"step": 94
},
{
"epoch": 0.28023598820059,
"grad_norm": 1.4964895248413086,
"learning_rate": 7.596123493895991e-07,
"loss": 2.0551,
"step": 95
},
{
"epoch": 0.2831858407079646,
"grad_norm": 2.6940832138061523,
"learning_rate": 4.865965629214819e-07,
"loss": 1.3975,
"step": 96
},
{
"epoch": 0.2861356932153392,
"grad_norm": 2.0973100662231445,
"learning_rate": 2.7390523158633554e-07,
"loss": 2.1289,
"step": 97
},
{
"epoch": 0.2890855457227139,
"grad_norm": 2.0837435722351074,
"learning_rate": 1.2179748700879012e-07,
"loss": 2.0609,
"step": 98
},
{
"epoch": 0.2920353982300885,
"grad_norm": 2.236985921859741,
"learning_rate": 3.04586490452119e-08,
"loss": 1.6664,
"step": 99
},
{
"epoch": 0.2920353982300885,
"eval_loss": 0.9275940656661987,
"eval_runtime": 44.3545,
"eval_samples_per_second": 6.448,
"eval_steps_per_second": 0.812,
"step": 99
},
{
"epoch": 0.2949852507374631,
"grad_norm": 2.534641981124878,
"learning_rate": 0.0,
"loss": 1.5521,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2695579394048e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}