vdos's picture
Training in progress, step 47, checkpoint
84a17ce verified
raw
history blame
8.86 kB
{
"best_metric": 11.071593284606934,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.008,
"eval_steps": 25,
"global_step": 47,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.064,
"grad_norm": 2.7551679611206055,
"learning_rate": 5e-05,
"loss": 11.0895,
"step": 1
},
{
"epoch": 0.064,
"eval_loss": 11.088528633117676,
"eval_runtime": 0.1013,
"eval_samples_per_second": 1046.436,
"eval_steps_per_second": 138.209,
"step": 1
},
{
"epoch": 0.128,
"grad_norm": 3.356942653656006,
"learning_rate": 0.0001,
"loss": 11.0903,
"step": 2
},
{
"epoch": 0.192,
"grad_norm": 3.378460168838501,
"learning_rate": 9.987820251299122e-05,
"loss": 11.0944,
"step": 3
},
{
"epoch": 0.256,
"grad_norm": 3.729534149169922,
"learning_rate": 9.951340343707852e-05,
"loss": 11.0861,
"step": 4
},
{
"epoch": 0.32,
"grad_norm": 3.094676971435547,
"learning_rate": 9.890738003669029e-05,
"loss": 11.0922,
"step": 5
},
{
"epoch": 0.384,
"grad_norm": 3.3166348934173584,
"learning_rate": 9.806308479691595e-05,
"loss": 11.0921,
"step": 6
},
{
"epoch": 0.448,
"grad_norm": 3.6665329933166504,
"learning_rate": 9.698463103929542e-05,
"loss": 11.0857,
"step": 7
},
{
"epoch": 0.512,
"grad_norm": 4.133778095245361,
"learning_rate": 9.567727288213005e-05,
"loss": 11.0874,
"step": 8
},
{
"epoch": 0.576,
"grad_norm": 3.0725619792938232,
"learning_rate": 9.414737964294636e-05,
"loss": 11.0896,
"step": 9
},
{
"epoch": 0.64,
"grad_norm": 3.256758451461792,
"learning_rate": 9.24024048078213e-05,
"loss": 11.0865,
"step": 10
},
{
"epoch": 0.704,
"grad_norm": 3.9415805339813232,
"learning_rate": 9.045084971874738e-05,
"loss": 11.0848,
"step": 11
},
{
"epoch": 0.768,
"grad_norm": 3.417839288711548,
"learning_rate": 8.83022221559489e-05,
"loss": 11.0847,
"step": 12
},
{
"epoch": 0.832,
"grad_norm": 3.2900781631469727,
"learning_rate": 8.596699001693255e-05,
"loss": 11.0839,
"step": 13
},
{
"epoch": 0.896,
"grad_norm": 3.490720748901367,
"learning_rate": 8.345653031794292e-05,
"loss": 11.0831,
"step": 14
},
{
"epoch": 0.96,
"grad_norm": 3.946263074874878,
"learning_rate": 8.07830737662829e-05,
"loss": 11.0783,
"step": 15
},
{
"epoch": 1.024,
"grad_norm": 3.4524974822998047,
"learning_rate": 7.795964517353735e-05,
"loss": 11.0873,
"step": 16
},
{
"epoch": 1.088,
"grad_norm": 3.8025898933410645,
"learning_rate": 7.500000000000001e-05,
"loss": 11.0816,
"step": 17
},
{
"epoch": 1.152,
"grad_norm": 3.968613386154175,
"learning_rate": 7.191855733945387e-05,
"loss": 11.0806,
"step": 18
},
{
"epoch": 1.216,
"grad_norm": 4.949509143829346,
"learning_rate": 6.873032967079561e-05,
"loss": 11.0765,
"step": 19
},
{
"epoch": 1.28,
"grad_norm": 4.188719272613525,
"learning_rate": 6.545084971874738e-05,
"loss": 11.0805,
"step": 20
},
{
"epoch": 1.3439999999999999,
"grad_norm": 3.8235926628112793,
"learning_rate": 6.209609477998338e-05,
"loss": 11.0809,
"step": 21
},
{
"epoch": 1.408,
"grad_norm": 4.225762844085693,
"learning_rate": 5.868240888334653e-05,
"loss": 11.077,
"step": 22
},
{
"epoch": 1.472,
"grad_norm": 5.48159122467041,
"learning_rate": 5.522642316338268e-05,
"loss": 11.076,
"step": 23
},
{
"epoch": 1.536,
"grad_norm": 3.7110719680786133,
"learning_rate": 5.174497483512506e-05,
"loss": 11.0746,
"step": 24
},
{
"epoch": 1.6,
"grad_norm": 4.030111312866211,
"learning_rate": 4.825502516487497e-05,
"loss": 11.0775,
"step": 25
},
{
"epoch": 1.6,
"eval_loss": 11.071593284606934,
"eval_runtime": 0.1012,
"eval_samples_per_second": 1047.442,
"eval_steps_per_second": 138.341,
"step": 25
},
{
"epoch": 1.6640000000000001,
"grad_norm": 4.809144020080566,
"learning_rate": 4.477357683661734e-05,
"loss": 11.0729,
"step": 26
},
{
"epoch": 1.728,
"grad_norm": 5.203744888305664,
"learning_rate": 4.131759111665349e-05,
"loss": 11.078,
"step": 27
},
{
"epoch": 1.792,
"grad_norm": 3.483856678009033,
"learning_rate": 3.790390522001662e-05,
"loss": 11.0775,
"step": 28
},
{
"epoch": 1.8559999999999999,
"grad_norm": 4.308492660522461,
"learning_rate": 3.4549150281252636e-05,
"loss": 11.0724,
"step": 29
},
{
"epoch": 1.92,
"grad_norm": 4.462175369262695,
"learning_rate": 3.12696703292044e-05,
"loss": 11.0692,
"step": 30
},
{
"epoch": 1.984,
"grad_norm": 4.674779891967773,
"learning_rate": 2.8081442660546125e-05,
"loss": 11.0758,
"step": 31
},
{
"epoch": 2.048,
"grad_norm": 4.3319244384765625,
"learning_rate": 2.500000000000001e-05,
"loss": 11.0778,
"step": 32
},
{
"epoch": 2.112,
"grad_norm": 4.53694486618042,
"learning_rate": 2.2040354826462668e-05,
"loss": 11.078,
"step": 33
},
{
"epoch": 2.176,
"grad_norm": 4.895163536071777,
"learning_rate": 1.9216926233717085e-05,
"loss": 11.0718,
"step": 34
},
{
"epoch": 2.24,
"grad_norm": 6.260696887969971,
"learning_rate": 1.6543469682057106e-05,
"loss": 11.0733,
"step": 35
},
{
"epoch": 2.304,
"grad_norm": 3.851774215698242,
"learning_rate": 1.4033009983067452e-05,
"loss": 11.0782,
"step": 36
},
{
"epoch": 2.368,
"grad_norm": 4.45391845703125,
"learning_rate": 1.1697777844051105e-05,
"loss": 11.0703,
"step": 37
},
{
"epoch": 2.432,
"grad_norm": 5.240525722503662,
"learning_rate": 9.549150281252633e-06,
"loss": 11.0769,
"step": 38
},
{
"epoch": 2.496,
"grad_norm": 5.4221577644348145,
"learning_rate": 7.597595192178702e-06,
"loss": 11.0718,
"step": 39
},
{
"epoch": 2.56,
"grad_norm": 4.008364200592041,
"learning_rate": 5.852620357053651e-06,
"loss": 11.0728,
"step": 40
},
{
"epoch": 2.624,
"grad_norm": 4.672859191894531,
"learning_rate": 4.322727117869951e-06,
"loss": 11.0769,
"step": 41
},
{
"epoch": 2.6879999999999997,
"grad_norm": 5.85573148727417,
"learning_rate": 3.0153689607045845e-06,
"loss": 11.0651,
"step": 42
},
{
"epoch": 2.752,
"grad_norm": 5.035050868988037,
"learning_rate": 1.9369152030840556e-06,
"loss": 11.0731,
"step": 43
},
{
"epoch": 2.816,
"grad_norm": 4.608683109283447,
"learning_rate": 1.0926199633097157e-06,
"loss": 11.0689,
"step": 44
},
{
"epoch": 2.88,
"grad_norm": 5.071901321411133,
"learning_rate": 4.865965629214819e-07,
"loss": 11.0771,
"step": 45
},
{
"epoch": 2.944,
"grad_norm": 6.193559169769287,
"learning_rate": 1.2179748700879012e-07,
"loss": 11.0724,
"step": 46
},
{
"epoch": 3.008,
"grad_norm": 3.6927578449249268,
"learning_rate": 0.0,
"loss": 11.0724,
"step": 47
}
],
"logging_steps": 1,
"max_steps": 47,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3950531051520.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}