dsakerkwq's picture
Training in progress, step 50, checkpoint
a3ef5ba verified
raw
history blame
10.3 kB
{
"best_metric": 0.5986165404319763,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.034809851187886175,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006961970237577234,
"grad_norm": 4.59558629989624,
"learning_rate": 5e-05,
"loss": 2.8264,
"step": 1
},
{
"epoch": 0.0006961970237577234,
"eval_loss": 5.053422451019287,
"eval_runtime": 3.2961,
"eval_samples_per_second": 15.169,
"eval_steps_per_second": 3.944,
"step": 1
},
{
"epoch": 0.0013923940475154468,
"grad_norm": 6.345708847045898,
"learning_rate": 0.0001,
"loss": 3.6521,
"step": 2
},
{
"epoch": 0.0020885910712731704,
"grad_norm": 5.618392467498779,
"learning_rate": 9.990365154573717e-05,
"loss": 3.2998,
"step": 3
},
{
"epoch": 0.0027847880950308936,
"grad_norm": 5.514031887054443,
"learning_rate": 9.961501876182148e-05,
"loss": 2.304,
"step": 4
},
{
"epoch": 0.003480985118788617,
"grad_norm": 3.9541125297546387,
"learning_rate": 9.913533761814537e-05,
"loss": 1.5882,
"step": 5
},
{
"epoch": 0.004177182142546341,
"grad_norm": 3.096938133239746,
"learning_rate": 9.846666218300807e-05,
"loss": 1.2982,
"step": 6
},
{
"epoch": 0.004873379166304064,
"grad_norm": 2.73142409324646,
"learning_rate": 9.761185582727977e-05,
"loss": 1.0909,
"step": 7
},
{
"epoch": 0.005569576190061787,
"grad_norm": 2.2758121490478516,
"learning_rate": 9.657457896300791e-05,
"loss": 0.9526,
"step": 8
},
{
"epoch": 0.006265773213819511,
"grad_norm": 2.1884617805480957,
"learning_rate": 9.535927336897098e-05,
"loss": 0.9004,
"step": 9
},
{
"epoch": 0.006961970237577234,
"grad_norm": 1.905522108078003,
"learning_rate": 9.397114317029975e-05,
"loss": 0.8348,
"step": 10
},
{
"epoch": 0.007658167261334958,
"grad_norm": 1.953636646270752,
"learning_rate": 9.241613255361455e-05,
"loss": 0.8431,
"step": 11
},
{
"epoch": 0.008354364285092682,
"grad_norm": 2.07503342628479,
"learning_rate": 9.070090031310558e-05,
"loss": 0.8399,
"step": 12
},
{
"epoch": 0.009050561308850404,
"grad_norm": 2.3713271617889404,
"learning_rate": 8.883279133655399e-05,
"loss": 1.0231,
"step": 13
},
{
"epoch": 0.009746758332608128,
"grad_norm": 1.4214296340942383,
"learning_rate": 8.681980515339464e-05,
"loss": 0.8436,
"step": 14
},
{
"epoch": 0.010442955356365852,
"grad_norm": 1.3992135524749756,
"learning_rate": 8.467056167950311e-05,
"loss": 0.7956,
"step": 15
},
{
"epoch": 0.011139152380123574,
"grad_norm": 1.2084200382232666,
"learning_rate": 8.239426430539243e-05,
"loss": 0.6528,
"step": 16
},
{
"epoch": 0.011835349403881298,
"grad_norm": 1.291199803352356,
"learning_rate": 8.000066048588211e-05,
"loss": 0.7458,
"step": 17
},
{
"epoch": 0.012531546427639022,
"grad_norm": 1.2058777809143066,
"learning_rate": 7.75e-05,
"loss": 0.6487,
"step": 18
},
{
"epoch": 0.013227743451396745,
"grad_norm": 1.1101080179214478,
"learning_rate": 7.490299105985507e-05,
"loss": 0.6245,
"step": 19
},
{
"epoch": 0.013923940475154469,
"grad_norm": 1.1463162899017334,
"learning_rate": 7.222075445642904e-05,
"loss": 0.6225,
"step": 20
},
{
"epoch": 0.014620137498912193,
"grad_norm": 1.246386170387268,
"learning_rate": 6.946477593864228e-05,
"loss": 0.7079,
"step": 21
},
{
"epoch": 0.015316334522669915,
"grad_norm": 1.2304069995880127,
"learning_rate": 6.664685702961344e-05,
"loss": 0.6294,
"step": 22
},
{
"epoch": 0.01601253154642764,
"grad_norm": 1.3009647130966187,
"learning_rate": 6.377906449072578e-05,
"loss": 0.6165,
"step": 23
},
{
"epoch": 0.016708728570185363,
"grad_norm": 1.384681224822998,
"learning_rate": 6.087367864990233e-05,
"loss": 0.6018,
"step": 24
},
{
"epoch": 0.017404925593943087,
"grad_norm": 1.7515813112258911,
"learning_rate": 5.794314081535644e-05,
"loss": 0.6501,
"step": 25
},
{
"epoch": 0.017404925593943087,
"eval_loss": 0.7062661051750183,
"eval_runtime": 3.3907,
"eval_samples_per_second": 14.746,
"eval_steps_per_second": 3.834,
"step": 25
},
{
"epoch": 0.018101122617700808,
"grad_norm": 1.3576762676239014,
"learning_rate": 5.500000000000001e-05,
"loss": 0.7465,
"step": 26
},
{
"epoch": 0.018797319641458532,
"grad_norm": 1.313129186630249,
"learning_rate": 5.205685918464356e-05,
"loss": 0.6395,
"step": 27
},
{
"epoch": 0.019493516665216256,
"grad_norm": 1.188519835472107,
"learning_rate": 4.912632135009769e-05,
"loss": 0.7079,
"step": 28
},
{
"epoch": 0.02018971368897398,
"grad_norm": 1.1454977989196777,
"learning_rate": 4.6220935509274235e-05,
"loss": 0.6928,
"step": 29
},
{
"epoch": 0.020885910712731704,
"grad_norm": 1.0681382417678833,
"learning_rate": 4.3353142970386564e-05,
"loss": 0.6142,
"step": 30
},
{
"epoch": 0.021582107736489428,
"grad_norm": 1.1505845785140991,
"learning_rate": 4.053522406135775e-05,
"loss": 0.5769,
"step": 31
},
{
"epoch": 0.02227830476024715,
"grad_norm": 1.2340224981307983,
"learning_rate": 3.777924554357096e-05,
"loss": 0.5512,
"step": 32
},
{
"epoch": 0.022974501784004873,
"grad_norm": 1.0733771324157715,
"learning_rate": 3.509700894014496e-05,
"loss": 0.5667,
"step": 33
},
{
"epoch": 0.023670698807762597,
"grad_norm": 1.0927112102508545,
"learning_rate": 3.250000000000001e-05,
"loss": 0.5102,
"step": 34
},
{
"epoch": 0.02436689583152032,
"grad_norm": 1.3149555921554565,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.5898,
"step": 35
},
{
"epoch": 0.025063092855278045,
"grad_norm": 1.3991199731826782,
"learning_rate": 2.760573569460757e-05,
"loss": 0.6337,
"step": 36
},
{
"epoch": 0.025759289879035766,
"grad_norm": 1.4893077611923218,
"learning_rate": 2.53294383204969e-05,
"loss": 0.6278,
"step": 37
},
{
"epoch": 0.02645548690279349,
"grad_norm": 1.150443434715271,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.8092,
"step": 38
},
{
"epoch": 0.027151683926551214,
"grad_norm": 0.8582993745803833,
"learning_rate": 2.1167208663446025e-05,
"loss": 0.6176,
"step": 39
},
{
"epoch": 0.027847880950308938,
"grad_norm": 1.009572148323059,
"learning_rate": 1.9299099686894423e-05,
"loss": 0.5971,
"step": 40
},
{
"epoch": 0.02854407797406666,
"grad_norm": 1.0766137838363647,
"learning_rate": 1.758386744638546e-05,
"loss": 0.5786,
"step": 41
},
{
"epoch": 0.029240274997824386,
"grad_norm": 0.9323824644088745,
"learning_rate": 1.602885682970026e-05,
"loss": 0.555,
"step": 42
},
{
"epoch": 0.029936472021582106,
"grad_norm": 1.1088814735412598,
"learning_rate": 1.464072663102903e-05,
"loss": 0.5825,
"step": 43
},
{
"epoch": 0.03063266904533983,
"grad_norm": 0.9386204481124878,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.5434,
"step": 44
},
{
"epoch": 0.03132886606909756,
"grad_norm": 0.8790339827537537,
"learning_rate": 1.2388144172720251e-05,
"loss": 0.4153,
"step": 45
},
{
"epoch": 0.03202506309285528,
"grad_norm": 1.0394346714019775,
"learning_rate": 1.1533337816991932e-05,
"loss": 0.4865,
"step": 46
},
{
"epoch": 0.032721260116613,
"grad_norm": 1.093427062034607,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.5469,
"step": 47
},
{
"epoch": 0.03341745714037073,
"grad_norm": 1.174867033958435,
"learning_rate": 1.0384981238178534e-05,
"loss": 0.5534,
"step": 48
},
{
"epoch": 0.03411365416412845,
"grad_norm": 1.370957851409912,
"learning_rate": 1.0096348454262845e-05,
"loss": 0.5735,
"step": 49
},
{
"epoch": 0.034809851187886175,
"grad_norm": 1.5540739297866821,
"learning_rate": 1e-05,
"loss": 0.6185,
"step": 50
},
{
"epoch": 0.034809851187886175,
"eval_loss": 0.5986165404319763,
"eval_runtime": 3.3935,
"eval_samples_per_second": 14.734,
"eval_steps_per_second": 3.831,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.631381723512832e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}