bbytxt's picture
Training in progress, step 50, checkpoint
eb16a7a verified
raw
history blame
9.53 kB
{
"best_metric": 6.796112060546875,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.32,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": 0.9495358467102051,
"learning_rate": 2.9999999999999997e-05,
"loss": 6.9341,
"step": 1
},
{
"epoch": 0.0064,
"eval_loss": 6.940670013427734,
"eval_runtime": 0.4624,
"eval_samples_per_second": 108.123,
"eval_steps_per_second": 15.137,
"step": 1
},
{
"epoch": 0.0128,
"grad_norm": 0.8018835186958313,
"learning_rate": 5.9999999999999995e-05,
"loss": 6.9556,
"step": 2
},
{
"epoch": 0.0192,
"grad_norm": 0.9049068093299866,
"learning_rate": 8.999999999999999e-05,
"loss": 6.9653,
"step": 3
},
{
"epoch": 0.0256,
"grad_norm": 0.7154098749160767,
"learning_rate": 0.00011999999999999999,
"loss": 6.9551,
"step": 4
},
{
"epoch": 0.032,
"grad_norm": 0.7742681503295898,
"learning_rate": 0.00015,
"loss": 6.9498,
"step": 5
},
{
"epoch": 0.0384,
"grad_norm": 0.8399883508682251,
"learning_rate": 0.00017999999999999998,
"loss": 6.94,
"step": 6
},
{
"epoch": 0.0448,
"grad_norm": 0.5200990438461304,
"learning_rate": 0.00020999999999999998,
"loss": 6.9413,
"step": 7
},
{
"epoch": 0.0512,
"grad_norm": 0.6034143567085266,
"learning_rate": 0.00023999999999999998,
"loss": 6.9438,
"step": 8
},
{
"epoch": 0.0576,
"grad_norm": 0.5844438672065735,
"learning_rate": 0.00027,
"loss": 6.9437,
"step": 9
},
{
"epoch": 0.064,
"grad_norm": 0.6551675796508789,
"learning_rate": 0.0003,
"loss": 6.943,
"step": 10
},
{
"epoch": 0.0704,
"grad_norm": 0.6681671142578125,
"learning_rate": 0.0002999794957488703,
"loss": 6.9383,
"step": 11
},
{
"epoch": 0.0768,
"grad_norm": 0.6732079386711121,
"learning_rate": 0.0002999179886011389,
"loss": 6.9406,
"step": 12
},
{
"epoch": 0.0832,
"grad_norm": 0.6102283596992493,
"learning_rate": 0.0002998154953722457,
"loss": 6.9383,
"step": 13
},
{
"epoch": 0.0896,
"grad_norm": 0.6549004912376404,
"learning_rate": 0.00029967204408281613,
"loss": 6.9284,
"step": 14
},
{
"epoch": 0.096,
"grad_norm": 0.5962654948234558,
"learning_rate": 0.00029948767395100045,
"loss": 6.9047,
"step": 15
},
{
"epoch": 0.1024,
"grad_norm": 0.649928867816925,
"learning_rate": 0.0002992624353817517,
"loss": 6.9212,
"step": 16
},
{
"epoch": 0.1088,
"grad_norm": 0.504554808139801,
"learning_rate": 0.0002989963899530457,
"loss": 6.9179,
"step": 17
},
{
"epoch": 0.1152,
"grad_norm": 0.597122848033905,
"learning_rate": 0.00029868961039904624,
"loss": 6.9112,
"step": 18
},
{
"epoch": 0.1216,
"grad_norm": 0.5620957612991333,
"learning_rate": 0.00029834218059022024,
"loss": 6.9046,
"step": 19
},
{
"epoch": 0.128,
"grad_norm": 0.6724961400032043,
"learning_rate": 0.00029795419551040833,
"loss": 6.8993,
"step": 20
},
{
"epoch": 0.1344,
"grad_norm": 0.5969198346138,
"learning_rate": 0.00029752576123085736,
"loss": 6.8979,
"step": 21
},
{
"epoch": 0.1408,
"grad_norm": 0.5506598949432373,
"learning_rate": 0.0002970569948812214,
"loss": 6.8887,
"step": 22
},
{
"epoch": 0.1472,
"grad_norm": 0.7546066641807556,
"learning_rate": 0.0002965480246175399,
"loss": 6.8619,
"step": 23
},
{
"epoch": 0.1536,
"grad_norm": 0.6300695538520813,
"learning_rate": 0.0002959989895872009,
"loss": 6.8506,
"step": 24
},
{
"epoch": 0.16,
"grad_norm": 0.5741549134254456,
"learning_rate": 0.0002954100398908995,
"loss": 6.8638,
"step": 25
},
{
"epoch": 0.16,
"eval_loss": 6.874875545501709,
"eval_runtime": 0.047,
"eval_samples_per_second": 1063.777,
"eval_steps_per_second": 148.929,
"step": 25
},
{
"epoch": 0.1664,
"grad_norm": 0.5633116364479065,
"learning_rate": 0.0002947813365416023,
"loss": 6.8588,
"step": 26
},
{
"epoch": 0.1728,
"grad_norm": 0.6096696853637695,
"learning_rate": 0.0002941130514205272,
"loss": 6.8797,
"step": 27
},
{
"epoch": 0.1792,
"grad_norm": 0.5343248844146729,
"learning_rate": 0.0002934053672301536,
"loss": 6.8702,
"step": 28
},
{
"epoch": 0.1856,
"grad_norm": 0.6776606440544128,
"learning_rate": 0.00029265847744427303,
"loss": 6.84,
"step": 29
},
{
"epoch": 0.192,
"grad_norm": 0.5798454880714417,
"learning_rate": 0.00029187258625509513,
"loss": 6.8618,
"step": 30
},
{
"epoch": 0.1984,
"grad_norm": 0.6195558905601501,
"learning_rate": 0.00029104790851742417,
"loss": 6.8286,
"step": 31
},
{
"epoch": 0.2048,
"grad_norm": 0.556268036365509,
"learning_rate": 0.0002901846696899191,
"loss": 6.8496,
"step": 32
},
{
"epoch": 0.2112,
"grad_norm": 0.5736655592918396,
"learning_rate": 0.00028928310577345606,
"loss": 6.8319,
"step": 33
},
{
"epoch": 0.2176,
"grad_norm": 0.5479353666305542,
"learning_rate": 0.0002883434632466077,
"loss": 6.8609,
"step": 34
},
{
"epoch": 0.224,
"grad_norm": 0.8071677684783936,
"learning_rate": 0.00028736599899825856,
"loss": 6.9076,
"step": 35
},
{
"epoch": 0.2304,
"grad_norm": 0.775070071220398,
"learning_rate": 0.00028635098025737434,
"loss": 6.9059,
"step": 36
},
{
"epoch": 0.2368,
"grad_norm": 0.7527448534965515,
"learning_rate": 0.00028529868451994384,
"loss": 6.8908,
"step": 37
},
{
"epoch": 0.2432,
"grad_norm": 0.911148726940155,
"learning_rate": 0.0002842093994731145,
"loss": 6.8912,
"step": 38
},
{
"epoch": 0.2496,
"grad_norm": 1.4041032791137695,
"learning_rate": 0.00028308342291654174,
"loss": 6.8862,
"step": 39
},
{
"epoch": 0.256,
"grad_norm": 0.9864165186882019,
"learning_rate": 0.00028192106268097334,
"loss": 6.9131,
"step": 40
},
{
"epoch": 0.2624,
"grad_norm": 0.8453418612480164,
"learning_rate": 0.00028072263654409154,
"loss": 6.9059,
"step": 41
},
{
"epoch": 0.2688,
"grad_norm": 0.77274489402771,
"learning_rate": 0.0002794884721436361,
"loss": 6.8597,
"step": 42
},
{
"epoch": 0.2752,
"grad_norm": 0.7312727570533752,
"learning_rate": 0.00027821890688783083,
"loss": 6.8523,
"step": 43
},
{
"epoch": 0.2816,
"grad_norm": 0.7589954137802124,
"learning_rate": 0.0002769142878631403,
"loss": 6.8514,
"step": 44
},
{
"epoch": 0.288,
"grad_norm": 0.588603138923645,
"learning_rate": 0.00027557497173937923,
"loss": 6.7881,
"step": 45
},
{
"epoch": 0.2944,
"grad_norm": 0.632996678352356,
"learning_rate": 0.000274201324672203,
"loss": 6.7755,
"step": 46
},
{
"epoch": 0.3008,
"grad_norm": 0.575196385383606,
"learning_rate": 0.00027279372220300385,
"loss": 6.79,
"step": 47
},
{
"epoch": 0.3072,
"grad_norm": 0.5218028426170349,
"learning_rate": 0.0002713525491562421,
"loss": 6.7852,
"step": 48
},
{
"epoch": 0.3136,
"grad_norm": 0.5754166841506958,
"learning_rate": 0.00026987819953423867,
"loss": 6.7727,
"step": 49
},
{
"epoch": 0.32,
"grad_norm": 0.5350818634033203,
"learning_rate": 0.00026837107640945905,
"loss": 6.7583,
"step": 50
},
{
"epoch": 0.32,
"eval_loss": 6.796112060546875,
"eval_runtime": 0.0455,
"eval_samples_per_second": 1098.877,
"eval_steps_per_second": 153.843,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 626134425600.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}