dimasik87's picture
Training in progress, step 30, checkpoint
cc79b64 verified
raw
history blame
7.38 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0005081612819215272,
"eval_steps": 5,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.693870939738424e-05,
"grad_norm": 22.163108825683594,
"learning_rate": 2e-05,
"loss": 21.3169,
"step": 1
},
{
"epoch": 1.693870939738424e-05,
"eval_loss": 2.290062189102173,
"eval_runtime": 5153.9246,
"eval_samples_per_second": 9.646,
"eval_steps_per_second": 4.823,
"step": 1
},
{
"epoch": 3.387741879476848e-05,
"grad_norm": 31.874303817749023,
"learning_rate": 4e-05,
"loss": 19.141,
"step": 2
},
{
"epoch": 5.081612819215272e-05,
"grad_norm": 16.09918212890625,
"learning_rate": 6e-05,
"loss": 16.7916,
"step": 3
},
{
"epoch": 6.775483758953696e-05,
"grad_norm": 25.90465545654297,
"learning_rate": 8e-05,
"loss": 19.5082,
"step": 4
},
{
"epoch": 8.46935469869212e-05,
"grad_norm": 19.378210067749023,
"learning_rate": 0.0001,
"loss": 17.8913,
"step": 5
},
{
"epoch": 8.46935469869212e-05,
"eval_loss": 2.2351300716400146,
"eval_runtime": 5147.8027,
"eval_samples_per_second": 9.658,
"eval_steps_per_second": 4.829,
"step": 5
},
{
"epoch": 0.00010163225638430543,
"grad_norm": 19.173131942749023,
"learning_rate": 0.00012,
"loss": 17.3923,
"step": 6
},
{
"epoch": 0.00011857096578168967,
"grad_norm": 32.09618377685547,
"learning_rate": 0.00014,
"loss": 20.1479,
"step": 7
},
{
"epoch": 0.0001355096751790739,
"grad_norm": 21.93677520751953,
"learning_rate": 0.00016,
"loss": 17.782,
"step": 8
},
{
"epoch": 0.00015244838457645815,
"grad_norm": 21.510074615478516,
"learning_rate": 0.00018,
"loss": 17.7573,
"step": 9
},
{
"epoch": 0.0001693870939738424,
"grad_norm": 40.90770721435547,
"learning_rate": 0.0002,
"loss": 20.1925,
"step": 10
},
{
"epoch": 0.0001693870939738424,
"eval_loss": 2.188427686691284,
"eval_runtime": 5141.4887,
"eval_samples_per_second": 9.669,
"eval_steps_per_second": 4.835,
"step": 10
},
{
"epoch": 0.00018632580337122663,
"grad_norm": 23.243389129638672,
"learning_rate": 0.0001996917333733128,
"loss": 17.2822,
"step": 11
},
{
"epoch": 0.00020326451276861087,
"grad_norm": 23.551620483398438,
"learning_rate": 0.00019876883405951377,
"loss": 16.8668,
"step": 12
},
{
"epoch": 0.0002202032221659951,
"grad_norm": 17.5562686920166,
"learning_rate": 0.00019723699203976766,
"loss": 15.4958,
"step": 13
},
{
"epoch": 0.00023714193156337935,
"grad_norm": 25.39556312561035,
"learning_rate": 0.00019510565162951537,
"loss": 17.6328,
"step": 14
},
{
"epoch": 0.0002540806409607636,
"grad_norm": 17.877641677856445,
"learning_rate": 0.0001923879532511287,
"loss": 15.1113,
"step": 15
},
{
"epoch": 0.0002540806409607636,
"eval_loss": 2.129117488861084,
"eval_runtime": 5125.7738,
"eval_samples_per_second": 9.699,
"eval_steps_per_second": 4.85,
"step": 15
},
{
"epoch": 0.0002710193503581478,
"grad_norm": 30.318897247314453,
"learning_rate": 0.0001891006524188368,
"loss": 17.8594,
"step": 16
},
{
"epoch": 0.00028795805975553206,
"grad_norm": 15.739501953125,
"learning_rate": 0.00018526401643540922,
"loss": 17.0346,
"step": 17
},
{
"epoch": 0.0003048967691529163,
"grad_norm": 24.859498977661133,
"learning_rate": 0.00018090169943749476,
"loss": 18.724,
"step": 18
},
{
"epoch": 0.00032183547855030054,
"grad_norm": 16.235403060913086,
"learning_rate": 0.0001760405965600031,
"loss": 15.4015,
"step": 19
},
{
"epoch": 0.0003387741879476848,
"grad_norm": 15.718273162841797,
"learning_rate": 0.00017071067811865476,
"loss": 17.4672,
"step": 20
},
{
"epoch": 0.0003387741879476848,
"eval_loss": 2.085993766784668,
"eval_runtime": 5126.7679,
"eval_samples_per_second": 9.697,
"eval_steps_per_second": 4.849,
"step": 20
},
{
"epoch": 0.000355712897345069,
"grad_norm": 17.683870315551758,
"learning_rate": 0.00016494480483301836,
"loss": 17.1021,
"step": 21
},
{
"epoch": 0.00037265160674245326,
"grad_norm": 18.684045791625977,
"learning_rate": 0.00015877852522924732,
"loss": 15.9178,
"step": 22
},
{
"epoch": 0.0003895903161398375,
"grad_norm": 20.173887252807617,
"learning_rate": 0.0001522498564715949,
"loss": 17.3176,
"step": 23
},
{
"epoch": 0.00040652902553722173,
"grad_norm": 19.331239700317383,
"learning_rate": 0.00014539904997395468,
"loss": 16.0941,
"step": 24
},
{
"epoch": 0.000423467734934606,
"grad_norm": 19.496095657348633,
"learning_rate": 0.000138268343236509,
"loss": 15.8816,
"step": 25
},
{
"epoch": 0.000423467734934606,
"eval_loss": 2.0629289150238037,
"eval_runtime": 5134.9215,
"eval_samples_per_second": 9.682,
"eval_steps_per_second": 4.841,
"step": 25
},
{
"epoch": 0.0004404064443319902,
"grad_norm": 33.5986213684082,
"learning_rate": 0.00013090169943749476,
"loss": 15.7073,
"step": 26
},
{
"epoch": 0.00045734515372937445,
"grad_norm": 16.573434829711914,
"learning_rate": 0.00012334453638559057,
"loss": 14.8196,
"step": 27
},
{
"epoch": 0.0004742838631267587,
"grad_norm": 20.38245391845703,
"learning_rate": 0.0001156434465040231,
"loss": 16.6963,
"step": 28
},
{
"epoch": 0.0004912225725241429,
"grad_norm": 16.84417724609375,
"learning_rate": 0.0001078459095727845,
"loss": 16.4005,
"step": 29
},
{
"epoch": 0.0005081612819215272,
"grad_norm": 20.52333641052246,
"learning_rate": 0.0001,
"loss": 15.0153,
"step": 30
},
{
"epoch": 0.0005081612819215272,
"eval_loss": 2.041257858276367,
"eval_runtime": 5138.2338,
"eval_samples_per_second": 9.676,
"eval_steps_per_second": 4.838,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.206407486275584e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}