|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.006941431670281995, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.470715835140998e-05, |
|
"eval_loss": 1.488693118095398, |
|
"eval_runtime": 333.2635, |
|
"eval_samples_per_second": 36.404, |
|
"eval_steps_per_second": 18.202, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001735357917570499, |
|
"grad_norm": 0.0363650768995285, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7421, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0003470715835140998, |
|
"grad_norm": 0.04388673976063728, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9049, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005206073752711496, |
|
"grad_norm": 0.04997824877500534, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 0.9169, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0006941431670281996, |
|
"grad_norm": 0.07242757081985474, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.9632, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0008676789587852494, |
|
"grad_norm": 0.08268485963344574, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.1005, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0010412147505422993, |
|
"grad_norm": 0.09746357053518295, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.3431, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0012147505422993492, |
|
"grad_norm": 0.179508239030838, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 1.4761, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0013882863340563992, |
|
"grad_norm": 0.21443940699100494, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.8133, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.001561822125813449, |
|
"grad_norm": 0.4208425283432007, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 2.0637, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0017353579175704988, |
|
"grad_norm": 0.8397026062011719, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 3.094, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0017353579175704988, |
|
"eval_loss": 1.4274080991744995, |
|
"eval_runtime": 331.799, |
|
"eval_samples_per_second": 36.564, |
|
"eval_steps_per_second": 18.282, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0019088937093275488, |
|
"grad_norm": 0.04962216690182686, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 0.7878, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0020824295010845985, |
|
"grad_norm": 0.06914501637220383, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.9213, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0022559652928416485, |
|
"grad_norm": 0.0765681117773056, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 0.9687, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0024295010845986984, |
|
"grad_norm": 0.08268511295318604, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 1.0734, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0026030368763557484, |
|
"grad_norm": 0.09841444343328476, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 1.1728, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0027765726681127983, |
|
"grad_norm": 0.14268898963928223, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 1.3481, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0029501084598698483, |
|
"grad_norm": 0.21441680192947388, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 1.429, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.003123644251626898, |
|
"grad_norm": 0.22959406673908234, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 1.6465, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0032971800433839477, |
|
"grad_norm": 0.46887078881263733, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 1.7265, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0034707158351409977, |
|
"grad_norm": 1.4313136339187622, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 3.0376, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0034707158351409977, |
|
"eval_loss": 1.3536286354064941, |
|
"eval_runtime": 331.6476, |
|
"eval_samples_per_second": 36.581, |
|
"eval_steps_per_second": 18.29, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0036442516268980476, |
|
"grad_norm": 0.0890691876411438, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8272, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.0038177874186550976, |
|
"grad_norm": 0.08088309317827225, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 0.8692, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0039913232104121475, |
|
"grad_norm": 0.08706238865852356, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 0.9777, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.004164859002169197, |
|
"grad_norm": 0.10868410766124725, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 0.9109, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.004338394793926247, |
|
"grad_norm": 0.12020254135131836, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 1.1462, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.004511930585683297, |
|
"grad_norm": 0.17492125928401947, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 1.2319, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.004685466377440347, |
|
"grad_norm": 0.21846410632133484, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 1.505, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.004859002169197397, |
|
"grad_norm": 0.3237517476081848, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 1.6528, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.005032537960954447, |
|
"grad_norm": 0.5105227828025818, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 1.8486, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.005206073752711497, |
|
"grad_norm": 1.2406115531921387, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 2.7017, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.005206073752711497, |
|
"eval_loss": 1.3264102935791016, |
|
"eval_runtime": 334.9033, |
|
"eval_samples_per_second": 36.225, |
|
"eval_steps_per_second": 18.113, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.005379609544468546, |
|
"grad_norm": 0.07182818651199341, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 0.6991, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.005553145336225597, |
|
"grad_norm": 0.07443557679653168, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 0.8821, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.005726681127982646, |
|
"grad_norm": 0.08473718911409378, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 0.9581, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.0059002169197396965, |
|
"grad_norm": 0.10495265573263168, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 0.9973, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.006073752711496746, |
|
"grad_norm": 0.11671217530965805, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 1.1315, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.006247288503253796, |
|
"grad_norm": 0.19537697732448578, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 1.1631, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.006420824295010846, |
|
"grad_norm": 0.1836855262517929, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 1.2259, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0065943600867678955, |
|
"grad_norm": 0.27528730034828186, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 1.5324, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.006767895878524946, |
|
"grad_norm": 0.41066810488700867, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 2.0259, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.006941431670281995, |
|
"grad_norm": 1.2721341848373413, |
|
"learning_rate": 0.0, |
|
"loss": 2.6547, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.006941431670281995, |
|
"eval_loss": 1.3213353157043457, |
|
"eval_runtime": 333.0889, |
|
"eval_samples_per_second": 36.423, |
|
"eval_steps_per_second": 18.211, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3202304118620160.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|