ardaspear's picture
Training in progress, step 72, checkpoint
e79f3f8 verified
raw
history blame
6.82 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.26991565135895035,
"eval_steps": 9,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0037488284910965324,
"eval_loss": 2.055473566055298,
"eval_runtime": 12.7703,
"eval_samples_per_second": 35.238,
"eval_steps_per_second": 4.463,
"step": 1
},
{
"epoch": 0.011246485473289597,
"grad_norm": 0.1467806100845337,
"learning_rate": 1.5e-05,
"loss": 1.9063,
"step": 3
},
{
"epoch": 0.022492970946579195,
"grad_norm": 0.18374566733837128,
"learning_rate": 3e-05,
"loss": 1.9823,
"step": 6
},
{
"epoch": 0.033739456419868794,
"grad_norm": 0.2081272304058075,
"learning_rate": 4.5e-05,
"loss": 2.1133,
"step": 9
},
{
"epoch": 0.033739456419868794,
"eval_loss": 2.050758123397827,
"eval_runtime": 12.8585,
"eval_samples_per_second": 34.996,
"eval_steps_per_second": 4.433,
"step": 9
},
{
"epoch": 0.04498594189315839,
"grad_norm": 0.2559334337711334,
"learning_rate": 4.993910125649561e-05,
"loss": 2.0606,
"step": 12
},
{
"epoch": 0.056232427366447985,
"grad_norm": 0.15697269141674042,
"learning_rate": 4.962019382530521e-05,
"loss": 2.0357,
"step": 15
},
{
"epoch": 0.06747891283973759,
"grad_norm": 0.16153453290462494,
"learning_rate": 4.9031542398457974e-05,
"loss": 2.005,
"step": 18
},
{
"epoch": 0.06747891283973759,
"eval_loss": 2.0186078548431396,
"eval_runtime": 12.902,
"eval_samples_per_second": 34.878,
"eval_steps_per_second": 4.418,
"step": 18
},
{
"epoch": 0.07872539831302718,
"grad_norm": 0.15399610996246338,
"learning_rate": 4.817959636416969e-05,
"loss": 1.9246,
"step": 21
},
{
"epoch": 0.08997188378631678,
"grad_norm": 0.18686576187610626,
"learning_rate": 4.707368982147318e-05,
"loss": 1.9773,
"step": 24
},
{
"epoch": 0.10121836925960637,
"grad_norm": 0.1838475912809372,
"learning_rate": 4.572593931387604e-05,
"loss": 2.0225,
"step": 27
},
{
"epoch": 0.10121836925960637,
"eval_loss": 1.988869071006775,
"eval_runtime": 12.9477,
"eval_samples_per_second": 34.755,
"eval_steps_per_second": 4.402,
"step": 27
},
{
"epoch": 0.11246485473289597,
"grad_norm": 0.19368326663970947,
"learning_rate": 4.415111107797445e-05,
"loss": 1.9612,
"step": 30
},
{
"epoch": 0.12371134020618557,
"grad_norm": 0.17149889469146729,
"learning_rate": 4.2366459261474933e-05,
"loss": 2.022,
"step": 33
},
{
"epoch": 0.13495782567947517,
"grad_norm": 0.18569713830947876,
"learning_rate": 4.039153688314145e-05,
"loss": 1.9794,
"step": 36
},
{
"epoch": 0.13495782567947517,
"eval_loss": 1.9658715724945068,
"eval_runtime": 12.9648,
"eval_samples_per_second": 34.709,
"eval_steps_per_second": 4.397,
"step": 36
},
{
"epoch": 0.14620431115276475,
"grad_norm": 0.1827002316713333,
"learning_rate": 3.824798160583012e-05,
"loss": 1.889,
"step": 39
},
{
"epoch": 0.15745079662605435,
"grad_norm": 0.23876498639583588,
"learning_rate": 3.5959278669726935e-05,
"loss": 2.0144,
"step": 42
},
{
"epoch": 0.16869728209934395,
"grad_norm": 0.22354301810264587,
"learning_rate": 3.355050358314172e-05,
"loss": 1.9035,
"step": 45
},
{
"epoch": 0.16869728209934395,
"eval_loss": 1.9489043951034546,
"eval_runtime": 12.9724,
"eval_samples_per_second": 34.689,
"eval_steps_per_second": 4.394,
"step": 45
},
{
"epoch": 0.17994376757263356,
"grad_norm": 0.22030919790267944,
"learning_rate": 3.104804738999169e-05,
"loss": 2.0074,
"step": 48
},
{
"epoch": 0.19119025304592316,
"grad_norm": 0.20402924716472626,
"learning_rate": 2.8479327524001636e-05,
"loss": 1.9598,
"step": 51
},
{
"epoch": 0.20243673851921273,
"grad_norm": 0.21052472293376923,
"learning_rate": 2.587248741756253e-05,
"loss": 1.9229,
"step": 54
},
{
"epoch": 0.20243673851921273,
"eval_loss": 1.9371302127838135,
"eval_runtime": 12.9738,
"eval_samples_per_second": 34.685,
"eval_steps_per_second": 4.393,
"step": 54
},
{
"epoch": 0.21368322399250234,
"grad_norm": 0.20837906002998352,
"learning_rate": 2.3256088156396868e-05,
"loss": 1.818,
"step": 57
},
{
"epoch": 0.22492970946579194,
"grad_norm": 0.22922676801681519,
"learning_rate": 2.0658795558326743e-05,
"loss": 2.0037,
"step": 60
},
{
"epoch": 0.23617619493908154,
"grad_norm": 0.1948896199464798,
"learning_rate": 1.8109066104575023e-05,
"loss": 1.8037,
"step": 63
},
{
"epoch": 0.23617619493908154,
"eval_loss": 1.9288066625595093,
"eval_runtime": 13.0065,
"eval_samples_per_second": 34.598,
"eval_steps_per_second": 4.382,
"step": 63
},
{
"epoch": 0.24742268041237114,
"grad_norm": 0.22800111770629883,
"learning_rate": 1.56348351646022e-05,
"loss": 1.9253,
"step": 66
},
{
"epoch": 0.25866916588566075,
"grad_norm": 0.21982352435588837,
"learning_rate": 1.3263210930352737e-05,
"loss": 1.8411,
"step": 69
},
{
"epoch": 0.26991565135895035,
"grad_norm": 0.20461618900299072,
"learning_rate": 1.1020177413231334e-05,
"loss": 1.955,
"step": 72
},
{
"epoch": 0.26991565135895035,
"eval_loss": 1.9237501621246338,
"eval_runtime": 13.0394,
"eval_samples_per_second": 34.511,
"eval_steps_per_second": 4.371,
"step": 72
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.361735423983616e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}