nttx's picture
Training in progress, epoch 0, checkpoint
0304749 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0031486146095717885,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.574307304785894e-05,
"eval_loss": 2.3866379261016846,
"eval_runtime": 1500.9459,
"eval_samples_per_second": 17.819,
"eval_steps_per_second": 8.91,
"step": 1
},
{
"epoch": 7.871536523929471e-05,
"grad_norm": 12.448644638061523,
"learning_rate": 5e-05,
"loss": 7.7391,
"step": 5
},
{
"epoch": 0.00015743073047858942,
"grad_norm": 6.404262542724609,
"learning_rate": 0.0001,
"loss": 10.35,
"step": 10
},
{
"epoch": 0.00023614609571788412,
"grad_norm": 6.720837593078613,
"learning_rate": 9.98292246503335e-05,
"loss": 5.6461,
"step": 15
},
{
"epoch": 0.00031486146095717883,
"grad_norm": 5.995464324951172,
"learning_rate": 9.931806517013612e-05,
"loss": 2.7984,
"step": 20
},
{
"epoch": 0.00039357682619647357,
"grad_norm": 9.630922317504883,
"learning_rate": 9.847001329696653e-05,
"loss": 5.0363,
"step": 25
},
{
"epoch": 0.00047229219143576825,
"grad_norm": 7.160970687866211,
"learning_rate": 9.729086208503174e-05,
"loss": 6.5517,
"step": 30
},
{
"epoch": 0.000551007556675063,
"grad_norm": 9.611334800720215,
"learning_rate": 9.578866633275288e-05,
"loss": 5.6197,
"step": 35
},
{
"epoch": 0.0006297229219143577,
"grad_norm": 10.17956256866455,
"learning_rate": 9.397368756032445e-05,
"loss": 5.6486,
"step": 40
},
{
"epoch": 0.0007084382871536523,
"grad_norm": 9.616400718688965,
"learning_rate": 9.185832391312644e-05,
"loss": 4.7867,
"step": 45
},
{
"epoch": 0.0007871536523929471,
"grad_norm": 10.657410621643066,
"learning_rate": 8.945702546981969e-05,
"loss": 4.4847,
"step": 50
},
{
"epoch": 0.0007871536523929471,
"eval_loss": 1.1559579372406006,
"eval_runtime": 1500.0771,
"eval_samples_per_second": 17.83,
"eval_steps_per_second": 8.915,
"step": 50
},
{
"epoch": 0.0008658690176322418,
"grad_norm": 8.259451866149902,
"learning_rate": 8.678619553365659e-05,
"loss": 7.1665,
"step": 55
},
{
"epoch": 0.0009445843828715365,
"grad_norm": 4.17130708694458,
"learning_rate": 8.386407858128706e-05,
"loss": 3.9723,
"step": 60
},
{
"epoch": 0.0010232997481108312,
"grad_norm": 3.682018280029297,
"learning_rate": 8.07106356344834e-05,
"loss": 1.8443,
"step": 65
},
{
"epoch": 0.001102015113350126,
"grad_norm": 3.572805881500244,
"learning_rate": 7.734740790612136e-05,
"loss": 3.2193,
"step": 70
},
{
"epoch": 0.0011807304785894208,
"grad_norm": 5.4666523933410645,
"learning_rate": 7.379736965185368e-05,
"loss": 4.6715,
"step": 75
},
{
"epoch": 0.0012594458438287153,
"grad_norm": 7.5685200691223145,
"learning_rate": 7.008477123264848e-05,
"loss": 5.328,
"step": 80
},
{
"epoch": 0.0013381612090680101,
"grad_norm": 9.007378578186035,
"learning_rate": 6.623497346023418e-05,
"loss": 4.6684,
"step": 85
},
{
"epoch": 0.0014168765743073047,
"grad_norm": 5.461382865905762,
"learning_rate": 6.227427435703997e-05,
"loss": 4.786,
"step": 90
},
{
"epoch": 0.0014955919395465995,
"grad_norm": 9.335121154785156,
"learning_rate": 5.8229729514036705e-05,
"loss": 5.2938,
"step": 95
},
{
"epoch": 0.0015743073047858943,
"grad_norm": 11.640975952148438,
"learning_rate": 5.4128967273616625e-05,
"loss": 4.8757,
"step": 100
},
{
"epoch": 0.0015743073047858943,
"eval_loss": 1.0214067697525024,
"eval_runtime": 1500.7708,
"eval_samples_per_second": 17.822,
"eval_steps_per_second": 8.911,
"step": 100
},
{
"epoch": 0.0016530226700251888,
"grad_norm": 8.590002059936523,
"learning_rate": 5e-05,
"loss": 4.4979,
"step": 105
},
{
"epoch": 0.0017317380352644836,
"grad_norm": 8.198857307434082,
"learning_rate": 4.5871032726383386e-05,
"loss": 2.8344,
"step": 110
},
{
"epoch": 0.0018104534005037784,
"grad_norm": 2.7320914268493652,
"learning_rate": 4.17702704859633e-05,
"loss": 2.9306,
"step": 115
},
{
"epoch": 0.001889168765743073,
"grad_norm": 3.878121852874756,
"learning_rate": 3.772572564296005e-05,
"loss": 2.7627,
"step": 120
},
{
"epoch": 0.001967884130982368,
"grad_norm": 9.245368957519531,
"learning_rate": 3.3765026539765834e-05,
"loss": 4.5011,
"step": 125
},
{
"epoch": 0.0020465994962216624,
"grad_norm": 7.411713123321533,
"learning_rate": 2.991522876735154e-05,
"loss": 5.2115,
"step": 130
},
{
"epoch": 0.0021253148614609574,
"grad_norm": 8.136418342590332,
"learning_rate": 2.6202630348146324e-05,
"loss": 4.8772,
"step": 135
},
{
"epoch": 0.002204030226700252,
"grad_norm": 8.70052719116211,
"learning_rate": 2.2652592093878666e-05,
"loss": 5.0952,
"step": 140
},
{
"epoch": 0.0022827455919395465,
"grad_norm": 7.519270420074463,
"learning_rate": 1.928936436551661e-05,
"loss": 4.8706,
"step": 145
},
{
"epoch": 0.0023614609571788415,
"grad_norm": 11.668645858764648,
"learning_rate": 1.6135921418712956e-05,
"loss": 4.718,
"step": 150
},
{
"epoch": 0.0023614609571788415,
"eval_loss": 0.9602340459823608,
"eval_runtime": 1507.1015,
"eval_samples_per_second": 17.747,
"eval_steps_per_second": 8.873,
"step": 150
},
{
"epoch": 0.002440176322418136,
"grad_norm": 6.727120876312256,
"learning_rate": 1.3213804466343421e-05,
"loss": 5.3968,
"step": 155
},
{
"epoch": 0.0025188916876574307,
"grad_norm": 2.8705267906188965,
"learning_rate": 1.0542974530180327e-05,
"loss": 2.9229,
"step": 160
},
{
"epoch": 0.0025976070528967252,
"grad_norm": 4.095298767089844,
"learning_rate": 8.141676086873572e-06,
"loss": 1.7476,
"step": 165
},
{
"epoch": 0.0026763224181360202,
"grad_norm": 5.578668117523193,
"learning_rate": 6.026312439675552e-06,
"loss": 3.0731,
"step": 170
},
{
"epoch": 0.002755037783375315,
"grad_norm": 3.9415435791015625,
"learning_rate": 4.2113336672471245e-06,
"loss": 3.6369,
"step": 175
},
{
"epoch": 0.0028337531486146094,
"grad_norm": 12.799013137817383,
"learning_rate": 2.7091379149682685e-06,
"loss": 4.7291,
"step": 180
},
{
"epoch": 0.0029124685138539044,
"grad_norm": 11.411933898925781,
"learning_rate": 1.5299867030334814e-06,
"loss": 4.3032,
"step": 185
},
{
"epoch": 0.002991183879093199,
"grad_norm": 8.253731727600098,
"learning_rate": 6.819348298638839e-07,
"loss": 4.2849,
"step": 190
},
{
"epoch": 0.0030698992443324935,
"grad_norm": 9.577661514282227,
"learning_rate": 1.7077534966650766e-07,
"loss": 4.268,
"step": 195
},
{
"epoch": 0.0031486146095717885,
"grad_norm": 10.62493896484375,
"learning_rate": 0.0,
"loss": 4.8146,
"step": 200
},
{
"epoch": 0.0031486146095717885,
"eval_loss": 0.9478885531425476,
"eval_runtime": 1500.0374,
"eval_samples_per_second": 17.83,
"eval_steps_per_second": 8.915,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.951789291130061e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}