|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 270, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003703703703703704, |
|
"grad_norm": 0.6735236644744873, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 1.6565, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 1.047855257987976, |
|
"learning_rate": 5.5555555555555545e-05, |
|
"loss": 1.602, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"grad_norm": 0.8623495101928711, |
|
"learning_rate": 0.00011111111111111109, |
|
"loss": 1.5366, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 1.691426396369934, |
|
"learning_rate": 0.00016666666666666666, |
|
"loss": 1.3396, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 0.9092234969139099, |
|
"learning_rate": 0.00022222222222222218, |
|
"loss": 0.9343, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 0.4330471158027649, |
|
"learning_rate": 0.0002777777777777778, |
|
"loss": 0.8052, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.3238295912742615, |
|
"learning_rate": 0.0002998871928756345, |
|
"loss": 0.7332, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12962962962962962, |
|
"grad_norm": 0.29893988370895386, |
|
"learning_rate": 0.0002991984303609902, |
|
"loss": 0.6842, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.18665669858455658, |
|
"learning_rate": 0.0002978864495017194, |
|
"loss": 0.6534, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 0.16193003952503204, |
|
"learning_rate": 0.00029595673058697357, |
|
"loss": 0.6364, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.18518518518518517, |
|
"grad_norm": 0.14426390826702118, |
|
"learning_rate": 0.0002934173342660819, |
|
"loss": 0.6201, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2037037037037037, |
|
"grad_norm": 0.1245051696896553, |
|
"learning_rate": 0.00029027886787832844, |
|
"loss": 0.6117, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.14398011565208435, |
|
"learning_rate": 0.000286554441144922, |
|
"loss": 0.6018, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24074074074074073, |
|
"grad_norm": 0.1676715612411499, |
|
"learning_rate": 0.0002822596114082412, |
|
"loss": 0.6073, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.25925925925925924, |
|
"grad_norm": 0.1390787661075592, |
|
"learning_rate": 0.0002774123186470946, |
|
"loss": 0.594, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 0.13183851540088654, |
|
"learning_rate": 0.0002720328105394451, |
|
"loss": 0.589, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 0.14338625967502594, |
|
"learning_rate": 0.00026614355788561985, |
|
"loss": 0.5699, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.3148148148148148, |
|
"grad_norm": 0.1453503519296646, |
|
"learning_rate": 0.00025976916074529183, |
|
"loss": 0.5717, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 0.13095735013484955, |
|
"learning_rate": 0.00025293624568031, |
|
"loss": 0.5729, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.35185185185185186, |
|
"grad_norm": 0.16470138728618622, |
|
"learning_rate": 0.0002456733545326059, |
|
"loss": 0.5723, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 0.13821054995059967, |
|
"learning_rate": 0.00023801082520176267, |
|
"loss": 0.5583, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 0.16222749650478363, |
|
"learning_rate": 0.0002299806649202537, |
|
"loss": 0.5695, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.4074074074074074, |
|
"grad_norm": 0.1531219631433487, |
|
"learning_rate": 0.00022161641655569234, |
|
"loss": 0.5538, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.42592592592592593, |
|
"grad_norm": 0.15927164256572723, |
|
"learning_rate": 0.00021295301849856435, |
|
"loss": 0.5544, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 0.16342100501060486, |
|
"learning_rate": 0.00020402665872070654, |
|
"loss": 0.5492, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.46296296296296297, |
|
"grad_norm": 0.15846914052963257, |
|
"learning_rate": 0.00019487462361414626, |
|
"loss": 0.552, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.48148148148148145, |
|
"grad_norm": 0.14684665203094482, |
|
"learning_rate": 0.00018553514224171783, |
|
"loss": 0.5486, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.15133161842823029, |
|
"learning_rate": 0.00017604722665003956, |
|
"loss": 0.555, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.5185185185185185, |
|
"grad_norm": 0.15221647918224335, |
|
"learning_rate": 0.00016645050891187974, |
|
"loss": 0.5451, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5370370370370371, |
|
"grad_norm": 0.1682116985321045, |
|
"learning_rate": 0.00015678507557860595, |
|
"loss": 0.5435, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 0.15022225677967072, |
|
"learning_rate": 0.00014709130023422633, |
|
"loss": 0.5494, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5740740740740741, |
|
"grad_norm": 0.16548554599285126, |
|
"learning_rate": 0.00013740967485046393, |
|
"loss": 0.5528, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.16638116538524628, |
|
"learning_rate": 0.0001277806406473127, |
|
"loss": 0.5383, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"grad_norm": 0.16238638758659363, |
|
"learning_rate": 0.00011824441916558842, |
|
"loss": 0.534, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.6296296296296297, |
|
"grad_norm": 0.16411995887756348, |
|
"learning_rate": 0.00010884084425710479, |
|
"loss": 0.5385, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.6481481481481481, |
|
"grad_norm": 0.16393356025218964, |
|
"learning_rate": 9.960919569426869e-05, |
|
"loss": 0.5353, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.1613512635231018, |
|
"learning_rate": 9.058803509412646e-05, |
|
"loss": 0.5434, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6851851851851852, |
|
"grad_norm": 0.1644994616508484, |
|
"learning_rate": 8.18150448422249e-05, |
|
"loss": 0.5345, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.7037037037037037, |
|
"grad_norm": 0.15923035144805908, |
|
"learning_rate": 7.332687068911903e-05, |
|
"loss": 0.5385, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.7222222222222222, |
|
"grad_norm": 0.16599948704242706, |
|
"learning_rate": 6.515896867701923e-05, |
|
"loss": 0.54, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 0.16556750237941742, |
|
"learning_rate": 5.734545703598145e-05, |
|
"loss": 0.5339, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7592592592592593, |
|
"grad_norm": 0.17149387300014496, |
|
"learning_rate": 4.991897366828704e-05, |
|
"loss": 0.5386, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 0.16719135642051697, |
|
"learning_rate": 4.2910539816315164e-05, |
|
"loss": 0.53, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7962962962962963, |
|
"grad_norm": 0.15575380623340607, |
|
"learning_rate": 3.6349430483382306e-05, |
|
"loss": 0.5288, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.8148148148148148, |
|
"grad_norm": 0.15009023249149323, |
|
"learning_rate": 3.0263052148816046e-05, |
|
"loss": 0.5302, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 0.15368495881557465, |
|
"learning_rate": 2.4676828288059558e-05, |
|
"loss": 0.5369, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.8518518518518519, |
|
"grad_norm": 0.1581869274377823, |
|
"learning_rate": 1.9614093176002828e-05, |
|
"loss": 0.5435, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8703703703703703, |
|
"grad_norm": 0.16010069847106934, |
|
"learning_rate": 1.5095994417136053e-05, |
|
"loss": 0.5342, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.15556618571281433, |
|
"learning_rate": 1.1141404609666449e-05, |
|
"loss": 0.5253, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.9074074074074074, |
|
"grad_norm": 0.15522176027297974, |
|
"learning_rate": 7.766842512588529e-06, |
|
"loss": 0.531, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.9259259259259259, |
|
"grad_norm": 0.16106705367565155, |
|
"learning_rate": 4.986404045000697e-06, |
|
"loss": 0.5304, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.9444444444444444, |
|
"grad_norm": 0.1596304476261139, |
|
"learning_rate": 2.811703405892296e-06, |
|
"loss": 0.5268, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.9629629629629629, |
|
"grad_norm": 0.15755844116210938, |
|
"learning_rate": 1.2518245603498345e-06, |
|
"loss": 0.5315, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9814814814814815, |
|
"grad_norm": 0.20954440534114838, |
|
"learning_rate": 3.1328329483019663e-07, |
|
"loss": 0.5378, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.16012723743915558, |
|
"learning_rate": 0.0, |
|
"loss": 0.5403, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.2819597721099854, |
|
"eval_runtime": 1.1563, |
|
"eval_samples_per_second": 3.459, |
|
"eval_steps_per_second": 0.865, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 270, |
|
"total_flos": 7.971483567941222e+17, |
|
"train_loss": 0.6255532653243453, |
|
"train_runtime": 2916.7528, |
|
"train_samples_per_second": 5.915, |
|
"train_steps_per_second": 0.093 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 270, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.971483567941222e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|