nttx's picture
Training in progress, epoch 0, checkpoint
5c0489b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.004524323896347739,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.26216194817387e-05,
"eval_loss": 4.674833297729492,
"eval_runtime": 1630.3176,
"eval_samples_per_second": 11.417,
"eval_steps_per_second": 5.709,
"step": 1
},
{
"epoch": 0.00011310809740869349,
"grad_norm": 1.904850959777832,
"learning_rate": 5e-05,
"loss": 1.0451,
"step": 5
},
{
"epoch": 0.00022621619481738697,
"grad_norm": 3.487840414047241,
"learning_rate": 0.0001,
"loss": 1.0945,
"step": 10
},
{
"epoch": 0.00033932429222608047,
"grad_norm": 7.243443965911865,
"learning_rate": 9.98292246503335e-05,
"loss": 1.5305,
"step": 15
},
{
"epoch": 0.00045243238963477394,
"grad_norm": 9.466878890991211,
"learning_rate": 9.931806517013612e-05,
"loss": 0.8418,
"step": 20
},
{
"epoch": 0.0005655404870434674,
"grad_norm": 14.048686981201172,
"learning_rate": 9.847001329696653e-05,
"loss": 0.612,
"step": 25
},
{
"epoch": 0.0006786485844521609,
"grad_norm": 12.626312255859375,
"learning_rate": 9.729086208503174e-05,
"loss": 0.5316,
"step": 30
},
{
"epoch": 0.0007917566818608545,
"grad_norm": 1.7504353523254395,
"learning_rate": 9.578866633275288e-05,
"loss": 0.2136,
"step": 35
},
{
"epoch": 0.0009048647792695479,
"grad_norm": 20.777067184448242,
"learning_rate": 9.397368756032445e-05,
"loss": 0.4962,
"step": 40
},
{
"epoch": 0.0010179728766782413,
"grad_norm": 15.338814735412598,
"learning_rate": 9.185832391312644e-05,
"loss": 0.4096,
"step": 45
},
{
"epoch": 0.0011310809740869348,
"grad_norm": 16.687267303466797,
"learning_rate": 8.945702546981969e-05,
"loss": 0.9656,
"step": 50
},
{
"epoch": 0.0011310809740869348,
"eval_loss": 0.6156600713729858,
"eval_runtime": 1638.7944,
"eval_samples_per_second": 11.358,
"eval_steps_per_second": 5.679,
"step": 50
},
{
"epoch": 0.0012441890714956284,
"grad_norm": 2.586226224899292,
"learning_rate": 8.678619553365659e-05,
"loss": 0.7048,
"step": 55
},
{
"epoch": 0.0013572971689043219,
"grad_norm": 5.269473552703857,
"learning_rate": 8.386407858128706e-05,
"loss": 0.7112,
"step": 60
},
{
"epoch": 0.0014704052663130154,
"grad_norm": 12.071560859680176,
"learning_rate": 8.07106356344834e-05,
"loss": 0.8531,
"step": 65
},
{
"epoch": 0.001583513363721709,
"grad_norm": 7.238372325897217,
"learning_rate": 7.734740790612136e-05,
"loss": 0.6405,
"step": 70
},
{
"epoch": 0.0016966214611304022,
"grad_norm": 7.999783039093018,
"learning_rate": 7.379736965185368e-05,
"loss": 0.5897,
"step": 75
},
{
"epoch": 0.0018097295585390958,
"grad_norm": 4.573731422424316,
"learning_rate": 7.008477123264848e-05,
"loss": 0.4493,
"step": 80
},
{
"epoch": 0.0019228376559477893,
"grad_norm": 9.74732780456543,
"learning_rate": 6.623497346023418e-05,
"loss": 0.3617,
"step": 85
},
{
"epoch": 0.0020359457533564826,
"grad_norm": 10.304098129272461,
"learning_rate": 6.227427435703997e-05,
"loss": 0.3788,
"step": 90
},
{
"epoch": 0.0021490538507651764,
"grad_norm": 4.553127765655518,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.2911,
"step": 95
},
{
"epoch": 0.0022621619481738697,
"grad_norm": 12.018383979797363,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.9804,
"step": 100
},
{
"epoch": 0.0022621619481738697,
"eval_loss": 0.46400874853134155,
"eval_runtime": 1639.1544,
"eval_samples_per_second": 11.355,
"eval_steps_per_second": 5.678,
"step": 100
},
{
"epoch": 0.0023752700455825634,
"grad_norm": 2.609111785888672,
"learning_rate": 5e-05,
"loss": 0.5825,
"step": 105
},
{
"epoch": 0.0024883781429912567,
"grad_norm": 1.579280138015747,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.542,
"step": 110
},
{
"epoch": 0.00260148624039995,
"grad_norm": 9.289449691772461,
"learning_rate": 4.17702704859633e-05,
"loss": 0.8798,
"step": 115
},
{
"epoch": 0.0027145943378086438,
"grad_norm": 5.802079677581787,
"learning_rate": 3.772572564296005e-05,
"loss": 0.7821,
"step": 120
},
{
"epoch": 0.002827702435217337,
"grad_norm": 5.292053699493408,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.5794,
"step": 125
},
{
"epoch": 0.002940810532626031,
"grad_norm": 10.068510055541992,
"learning_rate": 2.991522876735154e-05,
"loss": 0.2694,
"step": 130
},
{
"epoch": 0.003053918630034724,
"grad_norm": 12.650135040283203,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.2636,
"step": 135
},
{
"epoch": 0.003167026727443418,
"grad_norm": 0.9717761278152466,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.2023,
"step": 140
},
{
"epoch": 0.003280134824852111,
"grad_norm": 5.5953288078308105,
"learning_rate": 1.928936436551661e-05,
"loss": 0.4384,
"step": 145
},
{
"epoch": 0.0033932429222608045,
"grad_norm": 7.228976726531982,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.6151,
"step": 150
},
{
"epoch": 0.0033932429222608045,
"eval_loss": 0.42118245363235474,
"eval_runtime": 1638.6941,
"eval_samples_per_second": 11.358,
"eval_steps_per_second": 5.68,
"step": 150
},
{
"epoch": 0.0035063510196694982,
"grad_norm": 3.614445209503174,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.5205,
"step": 155
},
{
"epoch": 0.0036194591170781915,
"grad_norm": 2.2330234050750732,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.539,
"step": 160
},
{
"epoch": 0.0037325672144868853,
"grad_norm": 7.5552802085876465,
"learning_rate": 8.141676086873572e-06,
"loss": 0.6344,
"step": 165
},
{
"epoch": 0.0038456753118955786,
"grad_norm": 4.733217716217041,
"learning_rate": 6.026312439675552e-06,
"loss": 0.4547,
"step": 170
},
{
"epoch": 0.003958783409304272,
"grad_norm": 2.8408470153808594,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.2753,
"step": 175
},
{
"epoch": 0.004071891506712965,
"grad_norm": 13.729647636413574,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.2504,
"step": 180
},
{
"epoch": 0.004184999604121659,
"grad_norm": 2.171924352645874,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.1222,
"step": 185
},
{
"epoch": 0.004298107701530353,
"grad_norm": 0.47484132647514343,
"learning_rate": 6.819348298638839e-07,
"loss": 0.1661,
"step": 190
},
{
"epoch": 0.0044112157989390464,
"grad_norm": 5.313414573669434,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.4323,
"step": 195
},
{
"epoch": 0.004524323896347739,
"grad_norm": 5.613385200500488,
"learning_rate": 0.0,
"loss": 0.3597,
"step": 200
},
{
"epoch": 0.004524323896347739,
"eval_loss": 0.41343122720718384,
"eval_runtime": 1638.9598,
"eval_samples_per_second": 11.357,
"eval_steps_per_second": 5.679,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.209325172883456e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}