|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.013967699694456569, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002793539938891314, |
|
"grad_norm": 6.970461368560791, |
|
"learning_rate": 1e-05, |
|
"loss": 20.2109, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002793539938891314, |
|
"eval_loss": 1.442408561706543, |
|
"eval_runtime": 1007.3212, |
|
"eval_samples_per_second": 5.985, |
|
"eval_steps_per_second": 2.993, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005587079877782628, |
|
"grad_norm": 8.621979713439941, |
|
"learning_rate": 2e-05, |
|
"loss": 19.3008, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008380619816673942, |
|
"grad_norm": 25.636756896972656, |
|
"learning_rate": 3e-05, |
|
"loss": 23.4492, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011174159755565256, |
|
"grad_norm": 28.452890396118164, |
|
"learning_rate": 4e-05, |
|
"loss": 24.0312, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001396769969445657, |
|
"grad_norm": 25.22979164123535, |
|
"learning_rate": 5e-05, |
|
"loss": 23.2031, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016761239633347883, |
|
"grad_norm": 11.268664360046387, |
|
"learning_rate": 6e-05, |
|
"loss": 21.0352, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0019554779572239197, |
|
"grad_norm": 7.0515360832214355, |
|
"learning_rate": 7e-05, |
|
"loss": 18.0703, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0022348319511130512, |
|
"grad_norm": 14.672618865966797, |
|
"learning_rate": 8e-05, |
|
"loss": 20.1523, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0025141859450021824, |
|
"grad_norm": 36.66195297241211, |
|
"learning_rate": 9e-05, |
|
"loss": 26.6875, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.002793539938891314, |
|
"grad_norm": 19.24175262451172, |
|
"learning_rate": 0.0001, |
|
"loss": 23.5977, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003072893932780445, |
|
"grad_norm": 9.555656433105469, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 19.2812, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0033522479266695766, |
|
"grad_norm": 12.923059463500977, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 21.9219, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003631601920558708, |
|
"grad_norm": 7.538259506225586, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 18.7695, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003631601920558708, |
|
"eval_loss": 1.2370249032974243, |
|
"eval_runtime": 888.687, |
|
"eval_samples_per_second": 6.784, |
|
"eval_steps_per_second": 3.393, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003910955914447839, |
|
"grad_norm": 8.404040336608887, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 18.8438, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0041903099083369704, |
|
"grad_norm": 23.96065902709961, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 21.5391, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0044696639022261024, |
|
"grad_norm": 8.518821716308594, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 17.5547, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004749017896115234, |
|
"grad_norm": 9.683491706848145, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 18.3867, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005028371890004365, |
|
"grad_norm": 12.909642219543457, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 20.334, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.005307725883893497, |
|
"grad_norm": 6.089707851409912, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 18.9375, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005587079877782628, |
|
"grad_norm": 8.212544441223145, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 19.8164, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005866433871671759, |
|
"grad_norm": 8.69490909576416, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 19.0977, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00614578786556089, |
|
"grad_norm": 6.588229179382324, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 18.7383, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.006425141859450022, |
|
"grad_norm": 6.510587215423584, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 19.0391, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006704495853339153, |
|
"grad_norm": 10.146479606628418, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 20.457, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006983849847228284, |
|
"grad_norm": 6.790075302124023, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 19.5586, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007263203841117416, |
|
"grad_norm": 6.229572296142578, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 19.375, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.007263203841117416, |
|
"eval_loss": 1.168168306350708, |
|
"eval_runtime": 888.7918, |
|
"eval_samples_per_second": 6.783, |
|
"eval_steps_per_second": 3.392, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0075425578350065475, |
|
"grad_norm": 7.612943649291992, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 19.8984, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.007821911828895679, |
|
"grad_norm": 7.850345134735107, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 18.8711, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.00810126582278481, |
|
"grad_norm": 8.194131851196289, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 18.4531, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008380619816673941, |
|
"grad_norm": 8.798739433288574, |
|
"learning_rate": 5e-05, |
|
"loss": 20.082, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008659973810563073, |
|
"grad_norm": 4.806086540222168, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 18.3242, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.008939327804452205, |
|
"grad_norm": 6.420837879180908, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 17.5234, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009218681798341335, |
|
"grad_norm": 5.769430637359619, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 19.3711, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.009498035792230467, |
|
"grad_norm": 6.312093734741211, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 17.0234, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0097773897861196, |
|
"grad_norm": 10.681304931640625, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 19.4766, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01005674378000873, |
|
"grad_norm": 8.919983863830566, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 16.9336, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010336097773897861, |
|
"grad_norm": 7.540457725524902, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 17.8359, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.010615451767786993, |
|
"grad_norm": 6.8388752937316895, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 19.4062, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.010894805761676124, |
|
"grad_norm": 5.113622188568115, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 17.5781, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.010894805761676124, |
|
"eval_loss": 1.1385672092437744, |
|
"eval_runtime": 888.7356, |
|
"eval_samples_per_second": 6.784, |
|
"eval_steps_per_second": 3.392, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.011174159755565256, |
|
"grad_norm": 5.941615581512451, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 18.2148, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.011453513749454386, |
|
"grad_norm": 6.788694858551025, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 18.3477, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.011732867743343518, |
|
"grad_norm": 6.989747524261475, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 19.8516, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01201222173723265, |
|
"grad_norm": 9.784863471984863, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 16.9668, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.01229157573112178, |
|
"grad_norm": 7.244667053222656, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 18.1797, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012570929725010912, |
|
"grad_norm": 7.783876419067383, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 18.0039, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.012850283718900044, |
|
"grad_norm": 12.166605949401855, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 18.4736, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.013129637712789174, |
|
"grad_norm": 7.943640232086182, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 17.7656, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.013408991706678306, |
|
"grad_norm": 7.052063941955566, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 17.332, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013688345700567438, |
|
"grad_norm": 7.097042560577393, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 17.2266, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.013967699694456569, |
|
"grad_norm": 9.13848876953125, |
|
"learning_rate": 0.0, |
|
"loss": 18.5176, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.072003618832384e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|