bbytxt's picture
Training in progress, step 50, checkpoint
969dde3 verified
raw
history blame
9.21 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.16070711128967458,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032141422257934912,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.0032141422257934912,
"eval_loss": NaN,
"eval_runtime": 48.7668,
"eval_samples_per_second": 10.766,
"eval_steps_per_second": 5.393,
"step": 1
},
{
"epoch": 0.0064282844515869825,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.009642426677380474,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.012856568903173965,
"grad_norm": NaN,
"learning_rate": 8e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.016070711128967456,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.019284853354760947,
"grad_norm": NaN,
"learning_rate": 0.00012,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.02249899558055444,
"grad_norm": NaN,
"learning_rate": 0.00014,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.02571313780634793,
"grad_norm": NaN,
"learning_rate": 0.00016,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.02892728003214142,
"grad_norm": NaN,
"learning_rate": 0.00018,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.03214142225793491,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.035355564483728404,
"grad_norm": NaN,
"learning_rate": 0.00019988322268323268,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.038569706709521895,
"grad_norm": NaN,
"learning_rate": 0.00019953316347176488,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.041783848935315386,
"grad_norm": NaN,
"learning_rate": 0.0001989506399451051,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.04499799116110888,
"grad_norm": NaN,
"learning_rate": 0.00019813701261394136,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.04821213338690237,
"grad_norm": NaN,
"learning_rate": 0.0001970941817426052,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.05142627561269586,
"grad_norm": NaN,
"learning_rate": 0.00019582458291091663,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.05464041783848935,
"grad_norm": NaN,
"learning_rate": 0.0001943311813257743,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.05785456006428284,
"grad_norm": NaN,
"learning_rate": 0.00019261746489577765,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.061068702290076333,
"grad_norm": NaN,
"learning_rate": 0.00019068743608505455,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.06428284451586982,
"grad_norm": NaN,
"learning_rate": 0.000188545602565321,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.06749698674166332,
"grad_norm": NaN,
"learning_rate": 0.00018619696668800492,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.07071112896745681,
"grad_norm": NaN,
"learning_rate": 0.00018364701380102266,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.0739252711932503,
"grad_norm": NaN,
"learning_rate": 0.00018090169943749476,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.07713941341904379,
"grad_norm": NaN,
"learning_rate": 0.00017796743540632223,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.08035355564483729,
"grad_norm": NaN,
"learning_rate": 0.00017485107481711012,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.08035355564483729,
"eval_loss": NaN,
"eval_runtime": 48.3741,
"eval_samples_per_second": 10.853,
"eval_steps_per_second": 5.437,
"step": 25
},
{
"epoch": 0.08356769787063077,
"grad_norm": NaN,
"learning_rate": 0.00017155989607441213,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.08678184009642427,
"grad_norm": NaN,
"learning_rate": 0.00016810158587867973,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.08999598232221775,
"grad_norm": NaN,
"learning_rate": 0.00016448422127361706,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.09321012454801125,
"grad_norm": NaN,
"learning_rate": 0.00016071625078187114,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.09642426677380474,
"grad_norm": NaN,
"learning_rate": 0.00015680647467311557,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.09963840899959824,
"grad_norm": NaN,
"learning_rate": 0.0001527640244106133,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.10285255122539172,
"grad_norm": NaN,
"learning_rate": 0.0001485983413242606,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.10606669345118522,
"grad_norm": NaN,
"learning_rate": 0.00014431915455992414,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.1092808356769787,
"grad_norm": NaN,
"learning_rate": 0.00013993645835656953,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.1124949779027722,
"grad_norm": NaN,
"learning_rate": 0.00013546048870425356,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.11570912012856568,
"grad_norm": NaN,
"learning_rate": 0.00013090169943749476,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.11892326235435918,
"grad_norm": NaN,
"learning_rate": 0.0001262707378198587,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.12213740458015267,
"grad_norm": NaN,
"learning_rate": 0.00012157841967678063,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.12535154680594615,
"grad_norm": NaN,
"learning_rate": 0.00011683570413470383,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.12856568903173965,
"grad_norm": NaN,
"learning_rate": 0.0001120536680255323,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.13177983125753315,
"grad_norm": NaN,
"learning_rate": 0.00010724348001617625,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.13499397348332665,
"grad_norm": NaN,
"learning_rate": 0.00010241637452361323,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.13820811570912012,
"grad_norm": NaN,
"learning_rate": 9.75836254763868e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.14142225793491361,
"grad_norm": NaN,
"learning_rate": 9.275651998382377e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.1446364001607071,
"grad_norm": NaN,
"learning_rate": 8.79463319744677e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.1478505423865006,
"grad_norm": NaN,
"learning_rate": 8.316429586529615e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.15106468461229408,
"grad_norm": NaN,
"learning_rate": 7.84215803232194e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.15427882683808758,
"grad_norm": NaN,
"learning_rate": 7.372926218014131e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.15749296906388108,
"grad_norm": NaN,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.16070711128967458,
"grad_norm": NaN,
"learning_rate": 6.453951129574644e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.16070711128967458,
"eval_loss": NaN,
"eval_runtime": 48.3643,
"eval_samples_per_second": 10.855,
"eval_steps_per_second": 5.438,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.042792895709184e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}