|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05315967838394578, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002657983919197289, |
|
"eval_loss": 3.3747382164001465, |
|
"eval_runtime": 116.3735, |
|
"eval_samples_per_second": 13.62, |
|
"eval_steps_per_second": 6.814, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013289919595986443, |
|
"grad_norm": 0.19273613393306732, |
|
"learning_rate": 5e-05, |
|
"loss": 3.2518, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0026579839191972886, |
|
"grad_norm": 0.24546188116073608, |
|
"learning_rate": 0.0001, |
|
"loss": 3.3946, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003986975878795933, |
|
"grad_norm": 0.3712539076805115, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 3.3423, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005315967838394577, |
|
"grad_norm": 0.440332293510437, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 3.2702, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0066449597979932225, |
|
"grad_norm": 0.44500812888145447, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 3.1532, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007973951757591866, |
|
"grad_norm": 0.45136186480522156, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 3.1822, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.009302943717190511, |
|
"grad_norm": 0.5240881443023682, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 3.2693, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.010631935676789155, |
|
"grad_norm": 0.45544329285621643, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 3.2418, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0119609276363878, |
|
"grad_norm": 0.622912585735321, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 3.2255, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.013289919595986445, |
|
"grad_norm": 0.759652316570282, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 3.3534, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.013289919595986445, |
|
"eval_loss": 3.2474777698516846, |
|
"eval_runtime": 117.843, |
|
"eval_samples_per_second": 13.45, |
|
"eval_steps_per_second": 6.729, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.014618911555585088, |
|
"grad_norm": 0.36193954944610596, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 3.3303, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.015947903515183732, |
|
"grad_norm": 0.4073311686515808, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 3.1379, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.017276895474782377, |
|
"grad_norm": 0.37899190187454224, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 3.1246, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.018605887434381022, |
|
"grad_norm": 0.3801954984664917, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 3.2062, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.019934879393979667, |
|
"grad_norm": 0.418718546628952, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 3.1172, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02126387135357831, |
|
"grad_norm": 0.4203377962112427, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 3.1633, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.022592863313176954, |
|
"grad_norm": 0.45516136288642883, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 3.1142, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.0239218552727756, |
|
"grad_norm": 0.4883389472961426, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 3.125, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.025250847232374245, |
|
"grad_norm": 0.6239806413650513, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 3.1914, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.02657983919197289, |
|
"grad_norm": 0.8938126564025879, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 3.3532, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02657983919197289, |
|
"eval_loss": 3.226165771484375, |
|
"eval_runtime": 118.0147, |
|
"eval_samples_per_second": 13.431, |
|
"eval_steps_per_second": 6.72, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02790883115157153, |
|
"grad_norm": 0.40662944316864014, |
|
"learning_rate": 5e-05, |
|
"loss": 3.3708, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.029237823111170177, |
|
"grad_norm": 0.49958688020706177, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 3.2421, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.030566815070768822, |
|
"grad_norm": 0.42382222414016724, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 3.2441, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.031895807030367464, |
|
"grad_norm": 0.42244333028793335, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 3.1328, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.03322479898996611, |
|
"grad_norm": 0.5140597820281982, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 3.235, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.034553790949564754, |
|
"grad_norm": 0.5386475324630737, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 3.0768, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0358827829091634, |
|
"grad_norm": 0.5116773247718811, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 3.2749, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.037211774868762044, |
|
"grad_norm": 0.5770620703697205, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 3.1097, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.03854076682836069, |
|
"grad_norm": 0.685364305973053, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 3.2481, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.039869758787959335, |
|
"grad_norm": 1.0047972202301025, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 3.4641, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.039869758787959335, |
|
"eval_loss": 3.2148067951202393, |
|
"eval_runtime": 118.0135, |
|
"eval_samples_per_second": 13.431, |
|
"eval_steps_per_second": 6.72, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.04119875074755798, |
|
"grad_norm": 0.4244877099990845, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 3.1716, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.04252774270715662, |
|
"grad_norm": 0.424883633852005, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 3.1719, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.043856734666755263, |
|
"grad_norm": 0.42046844959259033, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 3.2414, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.04518572662635391, |
|
"grad_norm": 0.46279048919677734, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 3.1362, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.046514718585952554, |
|
"grad_norm": 0.47510379552841187, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 3.0648, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.0478437105455512, |
|
"grad_norm": 0.459209144115448, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 3.1999, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.049172702505149844, |
|
"grad_norm": 0.5207657814025879, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 3.1076, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.05050169446474849, |
|
"grad_norm": 0.5179470777511597, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 3.1441, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.051830686424347135, |
|
"grad_norm": 0.6202439069747925, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 3.1897, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.05315967838394578, |
|
"grad_norm": 0.8030288815498352, |
|
"learning_rate": 0.0, |
|
"loss": 3.3938, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.05315967838394578, |
|
"eval_loss": 3.2137556076049805, |
|
"eval_runtime": 117.9329, |
|
"eval_samples_per_second": 13.44, |
|
"eval_steps_per_second": 6.724, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.97054418436096e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|