ardaspear's picture
Training in progress, step 200, checkpoint
0e0ed49 verified
raw
history blame
8.72 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.050100200400801605,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000250501002004008,
"eval_loss": 3.704983949661255,
"eval_runtime": 18.3825,
"eval_samples_per_second": 91.446,
"eval_steps_per_second": 45.75,
"step": 1
},
{
"epoch": 0.00125250501002004,
"grad_norm": 0.9701552391052246,
"learning_rate": 5e-05,
"loss": 3.6869,
"step": 5
},
{
"epoch": 0.00250501002004008,
"grad_norm": 1.407057523727417,
"learning_rate": 0.0001,
"loss": 3.577,
"step": 10
},
{
"epoch": 0.00375751503006012,
"grad_norm": 1.114660620689392,
"learning_rate": 9.98292246503335e-05,
"loss": 3.6649,
"step": 15
},
{
"epoch": 0.00501002004008016,
"grad_norm": 1.180700659751892,
"learning_rate": 9.931806517013612e-05,
"loss": 3.4819,
"step": 20
},
{
"epoch": 0.006262525050100201,
"grad_norm": 1.1361351013183594,
"learning_rate": 9.847001329696653e-05,
"loss": 3.1851,
"step": 25
},
{
"epoch": 0.00751503006012024,
"grad_norm": 1.2357608079910278,
"learning_rate": 9.729086208503174e-05,
"loss": 3.1741,
"step": 30
},
{
"epoch": 0.00876753507014028,
"grad_norm": 1.252459168434143,
"learning_rate": 9.578866633275288e-05,
"loss": 3.1116,
"step": 35
},
{
"epoch": 0.01002004008016032,
"grad_norm": 1.5677341222763062,
"learning_rate": 9.397368756032445e-05,
"loss": 2.9296,
"step": 40
},
{
"epoch": 0.011272545090180362,
"grad_norm": 1.991075873374939,
"learning_rate": 9.185832391312644e-05,
"loss": 2.8175,
"step": 45
},
{
"epoch": 0.012525050100200401,
"grad_norm": 1.5289764404296875,
"learning_rate": 8.945702546981969e-05,
"loss": 2.7057,
"step": 50
},
{
"epoch": 0.012525050100200401,
"eval_loss": 2.62386417388916,
"eval_runtime": 18.7414,
"eval_samples_per_second": 89.694,
"eval_steps_per_second": 44.874,
"step": 50
},
{
"epoch": 0.01377755511022044,
"grad_norm": 1.3579661846160889,
"learning_rate": 8.678619553365659e-05,
"loss": 2.561,
"step": 55
},
{
"epoch": 0.01503006012024048,
"grad_norm": 1.8097760677337646,
"learning_rate": 8.386407858128706e-05,
"loss": 2.5397,
"step": 60
},
{
"epoch": 0.01628256513026052,
"grad_norm": 1.3172852993011475,
"learning_rate": 8.07106356344834e-05,
"loss": 2.4455,
"step": 65
},
{
"epoch": 0.01753507014028056,
"grad_norm": 2.0352325439453125,
"learning_rate": 7.734740790612136e-05,
"loss": 2.4834,
"step": 70
},
{
"epoch": 0.018787575150300603,
"grad_norm": 1.3835731744766235,
"learning_rate": 7.379736965185368e-05,
"loss": 2.4561,
"step": 75
},
{
"epoch": 0.02004008016032064,
"grad_norm": 1.4761914014816284,
"learning_rate": 7.008477123264848e-05,
"loss": 2.3603,
"step": 80
},
{
"epoch": 0.021292585170340682,
"grad_norm": 1.3032515048980713,
"learning_rate": 6.623497346023418e-05,
"loss": 2.2767,
"step": 85
},
{
"epoch": 0.022545090180360723,
"grad_norm": 1.3516151905059814,
"learning_rate": 6.227427435703997e-05,
"loss": 2.3037,
"step": 90
},
{
"epoch": 0.02379759519038076,
"grad_norm": 1.2633843421936035,
"learning_rate": 5.8229729514036705e-05,
"loss": 2.3047,
"step": 95
},
{
"epoch": 0.025050100200400802,
"grad_norm": 2.345054864883423,
"learning_rate": 5.4128967273616625e-05,
"loss": 2.3067,
"step": 100
},
{
"epoch": 0.025050100200400802,
"eval_loss": 2.3011841773986816,
"eval_runtime": 18.2416,
"eval_samples_per_second": 92.152,
"eval_steps_per_second": 46.103,
"step": 100
},
{
"epoch": 0.02630260521042084,
"grad_norm": 1.3435356616973877,
"learning_rate": 5e-05,
"loss": 2.2789,
"step": 105
},
{
"epoch": 0.02755511022044088,
"grad_norm": 1.5827653408050537,
"learning_rate": 4.5871032726383386e-05,
"loss": 2.2494,
"step": 110
},
{
"epoch": 0.028807615230460923,
"grad_norm": 1.6257189512252808,
"learning_rate": 4.17702704859633e-05,
"loss": 2.1587,
"step": 115
},
{
"epoch": 0.03006012024048096,
"grad_norm": 1.565990686416626,
"learning_rate": 3.772572564296005e-05,
"loss": 2.2483,
"step": 120
},
{
"epoch": 0.031312625250501,
"grad_norm": 1.6951042413711548,
"learning_rate": 3.3765026539765834e-05,
"loss": 2.1623,
"step": 125
},
{
"epoch": 0.03256513026052104,
"grad_norm": 1.3800190687179565,
"learning_rate": 2.991522876735154e-05,
"loss": 2.1412,
"step": 130
},
{
"epoch": 0.03381763527054108,
"grad_norm": 1.6663275957107544,
"learning_rate": 2.6202630348146324e-05,
"loss": 2.1782,
"step": 135
},
{
"epoch": 0.03507014028056112,
"grad_norm": 1.4055718183517456,
"learning_rate": 2.2652592093878666e-05,
"loss": 2.1664,
"step": 140
},
{
"epoch": 0.036322645290581164,
"grad_norm": 1.295013189315796,
"learning_rate": 1.928936436551661e-05,
"loss": 2.0866,
"step": 145
},
{
"epoch": 0.037575150300601205,
"grad_norm": 2.0372097492218018,
"learning_rate": 1.6135921418712956e-05,
"loss": 2.0893,
"step": 150
},
{
"epoch": 0.037575150300601205,
"eval_loss": 2.1908504962921143,
"eval_runtime": 18.5604,
"eval_samples_per_second": 90.569,
"eval_steps_per_second": 45.312,
"step": 150
},
{
"epoch": 0.03882765531062124,
"grad_norm": 1.412229061126709,
"learning_rate": 1.3213804466343421e-05,
"loss": 2.1178,
"step": 155
},
{
"epoch": 0.04008016032064128,
"grad_norm": 1.448472023010254,
"learning_rate": 1.0542974530180327e-05,
"loss": 2.2415,
"step": 160
},
{
"epoch": 0.04133266533066132,
"grad_norm": 1.5521998405456543,
"learning_rate": 8.141676086873572e-06,
"loss": 2.1963,
"step": 165
},
{
"epoch": 0.042585170340681364,
"grad_norm": 1.5726743936538696,
"learning_rate": 6.026312439675552e-06,
"loss": 2.2136,
"step": 170
},
{
"epoch": 0.043837675350701405,
"grad_norm": 1.397105097770691,
"learning_rate": 4.2113336672471245e-06,
"loss": 2.1761,
"step": 175
},
{
"epoch": 0.045090180360721446,
"grad_norm": 1.438555359840393,
"learning_rate": 2.7091379149682685e-06,
"loss": 2.0546,
"step": 180
},
{
"epoch": 0.04634268537074148,
"grad_norm": 1.6175150871276855,
"learning_rate": 1.5299867030334814e-06,
"loss": 2.149,
"step": 185
},
{
"epoch": 0.04759519038076152,
"grad_norm": 1.9222791194915771,
"learning_rate": 6.819348298638839e-07,
"loss": 2.1708,
"step": 190
},
{
"epoch": 0.04884769539078156,
"grad_norm": 1.566523790359497,
"learning_rate": 1.7077534966650766e-07,
"loss": 2.1843,
"step": 195
},
{
"epoch": 0.050100200400801605,
"grad_norm": 1.3286877870559692,
"learning_rate": 0.0,
"loss": 2.1899,
"step": 200
},
{
"epoch": 0.050100200400801605,
"eval_loss": 2.1727023124694824,
"eval_runtime": 18.36,
"eval_samples_per_second": 91.558,
"eval_steps_per_second": 45.806,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1371706019020800.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}