vdos's picture
Training in progress, step 50, checkpoint
4431c4e verified
raw
history blame
10.2 kB
{
"best_metric": 1.1867320537567139,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.02226737551145378,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00044534751022907564,
"grad_norm": 47.00421142578125,
"learning_rate": 5e-05,
"loss": 5.2543,
"step": 1
},
{
"epoch": 0.00044534751022907564,
"eval_loss": 6.243781089782715,
"eval_runtime": 737.4692,
"eval_samples_per_second": 20.513,
"eval_steps_per_second": 2.564,
"step": 1
},
{
"epoch": 0.0008906950204581513,
"grad_norm": 45.821319580078125,
"learning_rate": 0.0001,
"loss": 5.2971,
"step": 2
},
{
"epoch": 0.001336042530687227,
"grad_norm": 47.45862579345703,
"learning_rate": 9.989294616193017e-05,
"loss": 5.4538,
"step": 3
},
{
"epoch": 0.0017813900409163026,
"grad_norm": 70.39472961425781,
"learning_rate": 9.957224306869053e-05,
"loss": 5.7192,
"step": 4
},
{
"epoch": 0.002226737551145378,
"grad_norm": 76.26249694824219,
"learning_rate": 9.903926402016153e-05,
"loss": 4.7385,
"step": 5
},
{
"epoch": 0.002672085061374454,
"grad_norm": 83.42272186279297,
"learning_rate": 9.829629131445342e-05,
"loss": 4.399,
"step": 6
},
{
"epoch": 0.0031174325716035295,
"grad_norm": 77.07828521728516,
"learning_rate": 9.73465064747553e-05,
"loss": 3.9627,
"step": 7
},
{
"epoch": 0.003562780081832605,
"grad_norm": 70.13909149169922,
"learning_rate": 9.619397662556435e-05,
"loss": 3.5779,
"step": 8
},
{
"epoch": 0.00400812759206168,
"grad_norm": 46.44620895385742,
"learning_rate": 9.484363707663442e-05,
"loss": 3.2747,
"step": 9
},
{
"epoch": 0.004453475102290756,
"grad_norm": 49.42829513549805,
"learning_rate": 9.330127018922194e-05,
"loss": 2.744,
"step": 10
},
{
"epoch": 0.0048988226125198315,
"grad_norm": 48.85025405883789,
"learning_rate": 9.157348061512727e-05,
"loss": 2.5355,
"step": 11
},
{
"epoch": 0.005344170122748908,
"grad_norm": 45.37143325805664,
"learning_rate": 8.966766701456177e-05,
"loss": 2.6555,
"step": 12
},
{
"epoch": 0.0057895176329779835,
"grad_norm": 44.338478088378906,
"learning_rate": 8.759199037394887e-05,
"loss": 2.4589,
"step": 13
},
{
"epoch": 0.006234865143207059,
"grad_norm": 37.574886322021484,
"learning_rate": 8.535533905932738e-05,
"loss": 2.1478,
"step": 14
},
{
"epoch": 0.006680212653436135,
"grad_norm": 40.13383483886719,
"learning_rate": 8.296729075500344e-05,
"loss": 1.8073,
"step": 15
},
{
"epoch": 0.00712556016366521,
"grad_norm": 44.1107292175293,
"learning_rate": 8.043807145043604e-05,
"loss": 1.7844,
"step": 16
},
{
"epoch": 0.007570907673894286,
"grad_norm": 41.8834114074707,
"learning_rate": 7.777851165098012e-05,
"loss": 1.8501,
"step": 17
},
{
"epoch": 0.00801625518412336,
"grad_norm": 41.976158142089844,
"learning_rate": 7.500000000000001e-05,
"loss": 1.6489,
"step": 18
},
{
"epoch": 0.008461602694352437,
"grad_norm": 43.49707794189453,
"learning_rate": 7.211443451095007e-05,
"loss": 1.5935,
"step": 19
},
{
"epoch": 0.008906950204581512,
"grad_norm": 29.12820816040039,
"learning_rate": 6.91341716182545e-05,
"loss": 1.2958,
"step": 20
},
{
"epoch": 0.009352297714810588,
"grad_norm": 35.42374038696289,
"learning_rate": 6.607197326515808e-05,
"loss": 1.3354,
"step": 21
},
{
"epoch": 0.009797645225039663,
"grad_norm": 37.46640396118164,
"learning_rate": 6.294095225512603e-05,
"loss": 1.5067,
"step": 22
},
{
"epoch": 0.01024299273526874,
"grad_norm": 36.21051025390625,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.4837,
"step": 23
},
{
"epoch": 0.010688340245497816,
"grad_norm": 36.641109466552734,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.2959,
"step": 24
},
{
"epoch": 0.01113368775572689,
"grad_norm": 34.847164154052734,
"learning_rate": 5.327015646150716e-05,
"loss": 1.4527,
"step": 25
},
{
"epoch": 0.01113368775572689,
"eval_loss": 1.3544864654541016,
"eval_runtime": 736.2259,
"eval_samples_per_second": 20.548,
"eval_steps_per_second": 2.569,
"step": 25
},
{
"epoch": 0.011579035265955967,
"grad_norm": 30.952394485473633,
"learning_rate": 5e-05,
"loss": 1.5383,
"step": 26
},
{
"epoch": 0.012024382776185042,
"grad_norm": 32.05979919433594,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.4933,
"step": 27
},
{
"epoch": 0.012469730286414118,
"grad_norm": 27.44908332824707,
"learning_rate": 4.347369038899744e-05,
"loss": 1.1802,
"step": 28
},
{
"epoch": 0.012915077796643193,
"grad_norm": 29.982824325561523,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.2624,
"step": 29
},
{
"epoch": 0.01336042530687227,
"grad_norm": 26.732736587524414,
"learning_rate": 3.705904774487396e-05,
"loss": 1.1506,
"step": 30
},
{
"epoch": 0.013805772817101344,
"grad_norm": 29.822994232177734,
"learning_rate": 3.392802673484193e-05,
"loss": 1.1674,
"step": 31
},
{
"epoch": 0.01425112032733042,
"grad_norm": 28.42780876159668,
"learning_rate": 3.086582838174551e-05,
"loss": 1.0655,
"step": 32
},
{
"epoch": 0.014696467837559495,
"grad_norm": 27.28021812438965,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.3545,
"step": 33
},
{
"epoch": 0.015141815347788572,
"grad_norm": 26.741413116455078,
"learning_rate": 2.500000000000001e-05,
"loss": 1.0021,
"step": 34
},
{
"epoch": 0.015587162858017646,
"grad_norm": 38.1240119934082,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.1049,
"step": 35
},
{
"epoch": 0.01603251036824672,
"grad_norm": 31.203073501586914,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.1722,
"step": 36
},
{
"epoch": 0.016477857878475798,
"grad_norm": 28.79928207397461,
"learning_rate": 1.703270924499656e-05,
"loss": 1.0829,
"step": 37
},
{
"epoch": 0.016923205388704874,
"grad_norm": 29.13315773010254,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.2533,
"step": 38
},
{
"epoch": 0.01736855289893395,
"grad_norm": 31.34278678894043,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.338,
"step": 39
},
{
"epoch": 0.017813900409163023,
"grad_norm": 25.4918155670166,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.1436,
"step": 40
},
{
"epoch": 0.0182592479193921,
"grad_norm": 27.85494041442871,
"learning_rate": 8.426519384872733e-06,
"loss": 1.1686,
"step": 41
},
{
"epoch": 0.018704595429621176,
"grad_norm": 29.38884925842285,
"learning_rate": 6.698729810778065e-06,
"loss": 1.0788,
"step": 42
},
{
"epoch": 0.019149942939850253,
"grad_norm": 29.656871795654297,
"learning_rate": 5.156362923365588e-06,
"loss": 1.2324,
"step": 43
},
{
"epoch": 0.019595290450079326,
"grad_norm": 38.212669372558594,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.9916,
"step": 44
},
{
"epoch": 0.020040637960308402,
"grad_norm": 32.79332733154297,
"learning_rate": 2.653493525244721e-06,
"loss": 1.0483,
"step": 45
},
{
"epoch": 0.02048598547053748,
"grad_norm": 32.203372955322266,
"learning_rate": 1.70370868554659e-06,
"loss": 1.3274,
"step": 46
},
{
"epoch": 0.020931332980766555,
"grad_norm": 26.626222610473633,
"learning_rate": 9.607359798384785e-07,
"loss": 1.0576,
"step": 47
},
{
"epoch": 0.02137668049099563,
"grad_norm": 31.617427825927734,
"learning_rate": 4.277569313094809e-07,
"loss": 1.1872,
"step": 48
},
{
"epoch": 0.021822028001224705,
"grad_norm": 30.30613136291504,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.1083,
"step": 49
},
{
"epoch": 0.02226737551145378,
"grad_norm": 34.23044204711914,
"learning_rate": 0.0,
"loss": 1.2354,
"step": 50
},
{
"epoch": 0.02226737551145378,
"eval_loss": 1.1867320537567139,
"eval_runtime": 736.9926,
"eval_samples_per_second": 20.527,
"eval_steps_per_second": 2.566,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0952750720352256e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}