dimasik87's picture
Training in progress, step 50, checkpoint
48ce416 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.000846935469869212,
"eval_steps": 5,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.693870939738424e-05,
"grad_norm": 22.163108825683594,
"learning_rate": 2e-05,
"loss": 21.3169,
"step": 1
},
{
"epoch": 1.693870939738424e-05,
"eval_loss": 2.290062189102173,
"eval_runtime": 5153.9246,
"eval_samples_per_second": 9.646,
"eval_steps_per_second": 4.823,
"step": 1
},
{
"epoch": 3.387741879476848e-05,
"grad_norm": 31.874303817749023,
"learning_rate": 4e-05,
"loss": 19.141,
"step": 2
},
{
"epoch": 5.081612819215272e-05,
"grad_norm": 16.09918212890625,
"learning_rate": 6e-05,
"loss": 16.7916,
"step": 3
},
{
"epoch": 6.775483758953696e-05,
"grad_norm": 25.90465545654297,
"learning_rate": 8e-05,
"loss": 19.5082,
"step": 4
},
{
"epoch": 8.46935469869212e-05,
"grad_norm": 19.378210067749023,
"learning_rate": 0.0001,
"loss": 17.8913,
"step": 5
},
{
"epoch": 8.46935469869212e-05,
"eval_loss": 2.2351300716400146,
"eval_runtime": 5147.8027,
"eval_samples_per_second": 9.658,
"eval_steps_per_second": 4.829,
"step": 5
},
{
"epoch": 0.00010163225638430543,
"grad_norm": 19.173131942749023,
"learning_rate": 0.00012,
"loss": 17.3923,
"step": 6
},
{
"epoch": 0.00011857096578168967,
"grad_norm": 32.09618377685547,
"learning_rate": 0.00014,
"loss": 20.1479,
"step": 7
},
{
"epoch": 0.0001355096751790739,
"grad_norm": 21.93677520751953,
"learning_rate": 0.00016,
"loss": 17.782,
"step": 8
},
{
"epoch": 0.00015244838457645815,
"grad_norm": 21.510074615478516,
"learning_rate": 0.00018,
"loss": 17.7573,
"step": 9
},
{
"epoch": 0.0001693870939738424,
"grad_norm": 40.90770721435547,
"learning_rate": 0.0002,
"loss": 20.1925,
"step": 10
},
{
"epoch": 0.0001693870939738424,
"eval_loss": 2.188427686691284,
"eval_runtime": 5141.4887,
"eval_samples_per_second": 9.669,
"eval_steps_per_second": 4.835,
"step": 10
},
{
"epoch": 0.00018632580337122663,
"grad_norm": 23.243389129638672,
"learning_rate": 0.0001996917333733128,
"loss": 17.2822,
"step": 11
},
{
"epoch": 0.00020326451276861087,
"grad_norm": 23.551620483398438,
"learning_rate": 0.00019876883405951377,
"loss": 16.8668,
"step": 12
},
{
"epoch": 0.0002202032221659951,
"grad_norm": 17.5562686920166,
"learning_rate": 0.00019723699203976766,
"loss": 15.4958,
"step": 13
},
{
"epoch": 0.00023714193156337935,
"grad_norm": 25.39556312561035,
"learning_rate": 0.00019510565162951537,
"loss": 17.6328,
"step": 14
},
{
"epoch": 0.0002540806409607636,
"grad_norm": 17.877641677856445,
"learning_rate": 0.0001923879532511287,
"loss": 15.1113,
"step": 15
},
{
"epoch": 0.0002540806409607636,
"eval_loss": 2.129117488861084,
"eval_runtime": 5125.7738,
"eval_samples_per_second": 9.699,
"eval_steps_per_second": 4.85,
"step": 15
},
{
"epoch": 0.0002710193503581478,
"grad_norm": 30.318897247314453,
"learning_rate": 0.0001891006524188368,
"loss": 17.8594,
"step": 16
},
{
"epoch": 0.00028795805975553206,
"grad_norm": 15.739501953125,
"learning_rate": 0.00018526401643540922,
"loss": 17.0346,
"step": 17
},
{
"epoch": 0.0003048967691529163,
"grad_norm": 24.859498977661133,
"learning_rate": 0.00018090169943749476,
"loss": 18.724,
"step": 18
},
{
"epoch": 0.00032183547855030054,
"grad_norm": 16.235403060913086,
"learning_rate": 0.0001760405965600031,
"loss": 15.4015,
"step": 19
},
{
"epoch": 0.0003387741879476848,
"grad_norm": 15.718273162841797,
"learning_rate": 0.00017071067811865476,
"loss": 17.4672,
"step": 20
},
{
"epoch": 0.0003387741879476848,
"eval_loss": 2.085993766784668,
"eval_runtime": 5126.7679,
"eval_samples_per_second": 9.697,
"eval_steps_per_second": 4.849,
"step": 20
},
{
"epoch": 0.000355712897345069,
"grad_norm": 17.683870315551758,
"learning_rate": 0.00016494480483301836,
"loss": 17.1021,
"step": 21
},
{
"epoch": 0.00037265160674245326,
"grad_norm": 18.684045791625977,
"learning_rate": 0.00015877852522924732,
"loss": 15.9178,
"step": 22
},
{
"epoch": 0.0003895903161398375,
"grad_norm": 20.173887252807617,
"learning_rate": 0.0001522498564715949,
"loss": 17.3176,
"step": 23
},
{
"epoch": 0.00040652902553722173,
"grad_norm": 19.331239700317383,
"learning_rate": 0.00014539904997395468,
"loss": 16.0941,
"step": 24
},
{
"epoch": 0.000423467734934606,
"grad_norm": 19.496095657348633,
"learning_rate": 0.000138268343236509,
"loss": 15.8816,
"step": 25
},
{
"epoch": 0.000423467734934606,
"eval_loss": 2.0629289150238037,
"eval_runtime": 5134.9215,
"eval_samples_per_second": 9.682,
"eval_steps_per_second": 4.841,
"step": 25
},
{
"epoch": 0.0004404064443319902,
"grad_norm": 33.5986213684082,
"learning_rate": 0.00013090169943749476,
"loss": 15.7073,
"step": 26
},
{
"epoch": 0.00045734515372937445,
"grad_norm": 16.573434829711914,
"learning_rate": 0.00012334453638559057,
"loss": 14.8196,
"step": 27
},
{
"epoch": 0.0004742838631267587,
"grad_norm": 20.38245391845703,
"learning_rate": 0.0001156434465040231,
"loss": 16.6963,
"step": 28
},
{
"epoch": 0.0004912225725241429,
"grad_norm": 16.84417724609375,
"learning_rate": 0.0001078459095727845,
"loss": 16.4005,
"step": 29
},
{
"epoch": 0.0005081612819215272,
"grad_norm": 20.52333641052246,
"learning_rate": 0.0001,
"loss": 15.0153,
"step": 30
},
{
"epoch": 0.0005081612819215272,
"eval_loss": 2.041257858276367,
"eval_runtime": 5138.2338,
"eval_samples_per_second": 9.676,
"eval_steps_per_second": 4.838,
"step": 30
},
{
"epoch": 0.0005250999913189114,
"grad_norm": 20.041860580444336,
"learning_rate": 9.215409042721552e-05,
"loss": 15.1095,
"step": 31
},
{
"epoch": 0.0005420387007162956,
"grad_norm": 23.613113403320312,
"learning_rate": 8.435655349597689e-05,
"loss": 15.7084,
"step": 32
},
{
"epoch": 0.0005589774101136799,
"grad_norm": 15.596351623535156,
"learning_rate": 7.66554636144095e-05,
"loss": 15.3402,
"step": 33
},
{
"epoch": 0.0005759161195110641,
"grad_norm": 20.050952911376953,
"learning_rate": 6.909830056250527e-05,
"loss": 18.9168,
"step": 34
},
{
"epoch": 0.0005928548289084484,
"grad_norm": 14.879345893859863,
"learning_rate": 6.173165676349103e-05,
"loss": 14.3502,
"step": 35
},
{
"epoch": 0.0005928548289084484,
"eval_loss": 2.025693655014038,
"eval_runtime": 5133.43,
"eval_samples_per_second": 9.685,
"eval_steps_per_second": 4.842,
"step": 35
},
{
"epoch": 0.0006097935383058326,
"grad_norm": 17.409738540649414,
"learning_rate": 5.4600950026045326e-05,
"loss": 15.9409,
"step": 36
},
{
"epoch": 0.0006267322477032168,
"grad_norm": 19.219921112060547,
"learning_rate": 4.7750143528405126e-05,
"loss": 18.1686,
"step": 37
},
{
"epoch": 0.0006436709571006011,
"grad_norm": 15.948378562927246,
"learning_rate": 4.12214747707527e-05,
"loss": 15.5429,
"step": 38
},
{
"epoch": 0.0006606096664979853,
"grad_norm": 17.51921844482422,
"learning_rate": 3.5055195166981645e-05,
"loss": 14.8223,
"step": 39
},
{
"epoch": 0.0006775483758953696,
"grad_norm": 19.49251937866211,
"learning_rate": 2.9289321881345254e-05,
"loss": 16.4808,
"step": 40
},
{
"epoch": 0.0006775483758953696,
"eval_loss": 2.012857675552368,
"eval_runtime": 5129.6105,
"eval_samples_per_second": 9.692,
"eval_steps_per_second": 4.846,
"step": 40
},
{
"epoch": 0.0006944870852927538,
"grad_norm": 17.339757919311523,
"learning_rate": 2.3959403439996907e-05,
"loss": 14.7313,
"step": 41
},
{
"epoch": 0.000711425794690138,
"grad_norm": 15.278409957885742,
"learning_rate": 1.9098300562505266e-05,
"loss": 14.637,
"step": 42
},
{
"epoch": 0.0007283645040875223,
"grad_norm": 15.347859382629395,
"learning_rate": 1.4735983564590783e-05,
"loss": 16.3984,
"step": 43
},
{
"epoch": 0.0007453032134849065,
"grad_norm": 15.566729545593262,
"learning_rate": 1.0899347581163221e-05,
"loss": 15.9272,
"step": 44
},
{
"epoch": 0.0007622419228822908,
"grad_norm": 15.312088012695312,
"learning_rate": 7.612046748871327e-06,
"loss": 14.5249,
"step": 45
},
{
"epoch": 0.0007622419228822908,
"eval_loss": 2.008061170578003,
"eval_runtime": 5125.393,
"eval_samples_per_second": 9.7,
"eval_steps_per_second": 4.85,
"step": 45
},
{
"epoch": 0.000779180632279675,
"grad_norm": 18.034000396728516,
"learning_rate": 4.8943483704846475e-06,
"loss": 16.604,
"step": 46
},
{
"epoch": 0.0007961193416770592,
"grad_norm": 25.57406997680664,
"learning_rate": 2.7630079602323442e-06,
"loss": 19.8734,
"step": 47
},
{
"epoch": 0.0008130580510744435,
"grad_norm": 15.575130462646484,
"learning_rate": 1.231165940486234e-06,
"loss": 15.871,
"step": 48
},
{
"epoch": 0.0008299967604718277,
"grad_norm": 21.707000732421875,
"learning_rate": 3.0826662668720364e-07,
"loss": 16.6486,
"step": 49
},
{
"epoch": 0.000846935469869212,
"grad_norm": 19.565614700317383,
"learning_rate": 0.0,
"loss": 14.055,
"step": 50
},
{
"epoch": 0.000846935469869212,
"eval_loss": 2.0067672729492188,
"eval_runtime": 5122.5703,
"eval_samples_per_second": 9.705,
"eval_steps_per_second": 4.853,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.01067914379264e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}