|
{ |
|
"best_metric": 1.0611395835876465, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.025039907352342797, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005007981470468559, |
|
"grad_norm": 7.2735490798950195, |
|
"learning_rate": 5e-05, |
|
"loss": 27.6765, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005007981470468559, |
|
"eval_loss": 1.6915992498397827, |
|
"eval_runtime": 3.9192, |
|
"eval_samples_per_second": 12.758, |
|
"eval_steps_per_second": 3.317, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010015962940937118, |
|
"grad_norm": 9.645286560058594, |
|
"learning_rate": 0.0001, |
|
"loss": 28.8861, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0015023944411405679, |
|
"grad_norm": 6.561025619506836, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 23.5428, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0020031925881874237, |
|
"grad_norm": 6.4700422286987305, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 23.4867, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0025039907352342795, |
|
"grad_norm": 6.575279712677002, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 22.8772, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0030047888822811357, |
|
"grad_norm": 5.742631912231445, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 22.2415, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0035055870293279915, |
|
"grad_norm": 5.252403259277344, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 21.6715, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004006385176374847, |
|
"grad_norm": 4.608903884887695, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 20.1559, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004507183323421703, |
|
"grad_norm": 4.294509410858154, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 22.0346, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.005007981470468559, |
|
"grad_norm": 5.621428966522217, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 21.4893, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005508779617515415, |
|
"grad_norm": 5.655107498168945, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 18.073, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0060095777645622715, |
|
"grad_norm": 4.809823036193848, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 16.1442, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006510375911609127, |
|
"grad_norm": 8.046764373779297, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 21.9141, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007011174058655983, |
|
"grad_norm": 7.144588470458984, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 22.6744, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007511972205702839, |
|
"grad_norm": 5.577089786529541, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 19.4013, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.008012770352749695, |
|
"grad_norm": 4.5405731201171875, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 17.4941, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00851356849979655, |
|
"grad_norm": 5.8107147216796875, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 18.2896, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.009014366646843406, |
|
"grad_norm": 4.332961082458496, |
|
"learning_rate": 7.75e-05, |
|
"loss": 18.1127, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009515164793890262, |
|
"grad_norm": 4.734344959259033, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 18.5457, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.010015962940937118, |
|
"grad_norm": 4.264060020446777, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 18.2222, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010516761087983974, |
|
"grad_norm": 3.800395965576172, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 18.5526, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01101755923503083, |
|
"grad_norm": 3.4941272735595703, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 17.8441, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011518357382077687, |
|
"grad_norm": 3.7239904403686523, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 18.2648, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.012019155529124543, |
|
"grad_norm": 3.749605655670166, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 17.3797, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012519953676171399, |
|
"grad_norm": 6.037412643432617, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 18.479, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012519953676171399, |
|
"eval_loss": 1.1073193550109863, |
|
"eval_runtime": 3.9929, |
|
"eval_samples_per_second": 12.522, |
|
"eval_steps_per_second": 3.256, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013020751823218255, |
|
"grad_norm": 6.128429889678955, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 20.2164, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01352154997026511, |
|
"grad_norm": 7.410825729370117, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 20.2266, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.014022348117311966, |
|
"grad_norm": 4.50378942489624, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 17.0934, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014523146264358822, |
|
"grad_norm": 4.2456464767456055, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 17.7946, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.015023944411405678, |
|
"grad_norm": 3.301119327545166, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 19.8457, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015524742558452534, |
|
"grad_norm": 3.3931143283843994, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 17.4589, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01602554070549939, |
|
"grad_norm": 3.908799409866333, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 17.9799, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016526338852546247, |
|
"grad_norm": 4.308777809143066, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 18.2389, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0170271369995931, |
|
"grad_norm": 5.028868198394775, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 18.8644, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01752793514663996, |
|
"grad_norm": 5.518270492553711, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 17.9908, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.018028733293686813, |
|
"grad_norm": 4.996335506439209, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 15.7478, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01852953144073367, |
|
"grad_norm": 6.510356426239014, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 16.2257, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.019030329587780524, |
|
"grad_norm": 5.005591869354248, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 21.489, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.019531127734827382, |
|
"grad_norm": 9.856449127197266, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 21.2883, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.020031925881874236, |
|
"grad_norm": 3.623931646347046, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 17.6629, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020532724028921093, |
|
"grad_norm": 3.4106252193450928, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 15.4309, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.021033522175967947, |
|
"grad_norm": 4.296844005584717, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 17.6033, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021534320323014805, |
|
"grad_norm": 4.559662342071533, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 16.8699, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02203511847006166, |
|
"grad_norm": 4.427708625793457, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 17.4621, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.022535916617108517, |
|
"grad_norm": 3.9432215690612793, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 17.8905, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.023036714764155374, |
|
"grad_norm": 3.188026189804077, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 17.3133, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02353751291120223, |
|
"grad_norm": 3.24480938911438, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 16.5617, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.024038311058249086, |
|
"grad_norm": 3.795017719268799, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 16.9529, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02453910920529594, |
|
"grad_norm": 3.4241626262664795, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 14.9861, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.025039907352342797, |
|
"grad_norm": 5.961770057678223, |
|
"learning_rate": 1e-05, |
|
"loss": 17.1319, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025039907352342797, |
|
"eval_loss": 1.0611395835876465, |
|
"eval_runtime": 3.8308, |
|
"eval_samples_per_second": 13.052, |
|
"eval_steps_per_second": 3.394, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.9050464574754e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|