|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 270, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003703703703703704, |
|
"grad_norm": 0.6748902201652527, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 1.6565, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 0.8883910775184631, |
|
"learning_rate": 5.5555555555555545e-05, |
|
"loss": 1.6019, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"grad_norm": 0.9978822469711304, |
|
"learning_rate": 0.00011111111111111109, |
|
"loss": 1.5359, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 1.5893608331680298, |
|
"learning_rate": 0.00016666666666666666, |
|
"loss": 1.3426, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 0.9495543241500854, |
|
"learning_rate": 0.00022222222222222218, |
|
"loss": 0.9366, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 0.4493098258972168, |
|
"learning_rate": 0.0002777777777777778, |
|
"loss": 0.8074, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.3246346712112427, |
|
"learning_rate": 0.0002998871928756345, |
|
"loss": 0.7346, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12962962962962962, |
|
"grad_norm": 0.28874966502189636, |
|
"learning_rate": 0.0002991984303609902, |
|
"loss": 0.6855, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.19572241604328156, |
|
"learning_rate": 0.0002978864495017194, |
|
"loss": 0.6548, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 0.16895747184753418, |
|
"learning_rate": 0.00029595673058697357, |
|
"loss": 0.6371, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.18518518518518517, |
|
"grad_norm": 0.16596512496471405, |
|
"learning_rate": 0.0002934173342660819, |
|
"loss": 0.6211, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2037037037037037, |
|
"grad_norm": 0.13342902064323425, |
|
"learning_rate": 0.00029027886787832844, |
|
"loss": 0.6124, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.1435386687517166, |
|
"learning_rate": 0.000286554441144922, |
|
"loss": 0.6021, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24074074074074073, |
|
"grad_norm": 0.1567266881465912, |
|
"learning_rate": 0.0002822596114082412, |
|
"loss": 0.6076, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.25925925925925924, |
|
"grad_norm": 0.14005671441555023, |
|
"learning_rate": 0.0002774123186470946, |
|
"loss": 0.5941, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 0.13816745579242706, |
|
"learning_rate": 0.0002720328105394451, |
|
"loss": 0.5891, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 0.1502453237771988, |
|
"learning_rate": 0.00026614355788561985, |
|
"loss": 0.5701, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.3148148148148148, |
|
"grad_norm": 0.14816893637180328, |
|
"learning_rate": 0.00025976916074529183, |
|
"loss": 0.5718, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 0.13009826838970184, |
|
"learning_rate": 0.00025293624568031, |
|
"loss": 0.5729, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.35185185185185186, |
|
"grad_norm": 0.18656130135059357, |
|
"learning_rate": 0.0002456733545326059, |
|
"loss": 0.5724, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 0.14232292771339417, |
|
"learning_rate": 0.00023801082520176267, |
|
"loss": 0.5582, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 0.15933294594287872, |
|
"learning_rate": 0.0002299806649202537, |
|
"loss": 0.5697, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.4074074074074074, |
|
"grad_norm": 0.15552918612957, |
|
"learning_rate": 0.00022161641655569234, |
|
"loss": 0.5544, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.42592592592592593, |
|
"grad_norm": 0.16393014788627625, |
|
"learning_rate": 0.00021295301849856435, |
|
"loss": 0.5542, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 0.1773192435503006, |
|
"learning_rate": 0.00020402665872070654, |
|
"loss": 0.5491, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.46296296296296297, |
|
"grad_norm": 0.16194891929626465, |
|
"learning_rate": 0.00019487462361414626, |
|
"loss": 0.5525, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.48148148148148145, |
|
"grad_norm": 0.15379634499549866, |
|
"learning_rate": 0.00018553514224171783, |
|
"loss": 0.5489, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.170404314994812, |
|
"learning_rate": 0.00017604722665003956, |
|
"loss": 0.5549, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.5185185185185185, |
|
"grad_norm": 0.15406401455402374, |
|
"learning_rate": 0.00016645050891187974, |
|
"loss": 0.5451, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5370370370370371, |
|
"grad_norm": 0.16518822312355042, |
|
"learning_rate": 0.00015678507557860595, |
|
"loss": 0.5436, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 0.15576022863388062, |
|
"learning_rate": 0.00014709130023422633, |
|
"loss": 0.5492, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5740740740740741, |
|
"grad_norm": 0.1619407832622528, |
|
"learning_rate": 0.00013740967485046393, |
|
"loss": 0.5525, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.1685595065355301, |
|
"learning_rate": 0.0001277806406473127, |
|
"loss": 0.5385, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"grad_norm": 0.16706547141075134, |
|
"learning_rate": 0.00011824441916558842, |
|
"loss": 0.5333, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.6296296296296297, |
|
"grad_norm": 0.1658417284488678, |
|
"learning_rate": 0.00010884084425710479, |
|
"loss": 0.5383, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.6481481481481481, |
|
"grad_norm": 0.16389456391334534, |
|
"learning_rate": 9.960919569426869e-05, |
|
"loss": 0.5353, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.1591169834136963, |
|
"learning_rate": 9.058803509412646e-05, |
|
"loss": 0.5431, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6851851851851852, |
|
"grad_norm": 0.1656966358423233, |
|
"learning_rate": 8.18150448422249e-05, |
|
"loss": 0.5342, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.7037037037037037, |
|
"grad_norm": 0.1611323356628418, |
|
"learning_rate": 7.332687068911903e-05, |
|
"loss": 0.5387, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.7222222222222222, |
|
"grad_norm": 0.16765892505645752, |
|
"learning_rate": 6.515896867701923e-05, |
|
"loss": 0.5399, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 0.1689939796924591, |
|
"learning_rate": 5.734545703598145e-05, |
|
"loss": 0.5337, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7592592592592593, |
|
"grad_norm": 0.17689311504364014, |
|
"learning_rate": 4.991897366828704e-05, |
|
"loss": 0.539, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 0.16849064826965332, |
|
"learning_rate": 4.2910539816315164e-05, |
|
"loss": 0.5298, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7962962962962963, |
|
"grad_norm": 0.1593889743089676, |
|
"learning_rate": 3.6349430483382306e-05, |
|
"loss": 0.5287, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.8148148148148148, |
|
"grad_norm": 0.1509651094675064, |
|
"learning_rate": 3.0263052148816046e-05, |
|
"loss": 0.5303, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 0.1566237062215805, |
|
"learning_rate": 2.4676828288059558e-05, |
|
"loss": 0.5365, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.8518518518518519, |
|
"grad_norm": 0.15855161845684052, |
|
"learning_rate": 1.9614093176002828e-05, |
|
"loss": 0.5433, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8703703703703703, |
|
"grad_norm": 0.16000784933567047, |
|
"learning_rate": 1.5095994417136053e-05, |
|
"loss": 0.5336, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.15544116497039795, |
|
"learning_rate": 1.1141404609666449e-05, |
|
"loss": 0.5255, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.9074074074074074, |
|
"grad_norm": 0.15555834770202637, |
|
"learning_rate": 7.766842512588529e-06, |
|
"loss": 0.5307, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.9259259259259259, |
|
"grad_norm": 0.163555309176445, |
|
"learning_rate": 4.986404045000697e-06, |
|
"loss": 0.5301, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.9444444444444444, |
|
"grad_norm": 0.16425177454948425, |
|
"learning_rate": 2.811703405892296e-06, |
|
"loss": 0.5268, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.9629629629629629, |
|
"grad_norm": 0.15855489671230316, |
|
"learning_rate": 1.2518245603498345e-06, |
|
"loss": 0.5314, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9814814814814815, |
|
"grad_norm": 0.1560031622648239, |
|
"learning_rate": 3.1328329483019663e-07, |
|
"loss": 0.5381, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.16213464736938477, |
|
"learning_rate": 0.0, |
|
"loss": 0.5406, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.2763605117797852, |
|
"eval_runtime": 1.1636, |
|
"eval_samples_per_second": 3.438, |
|
"eval_steps_per_second": 0.859, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 270, |
|
"total_flos": 7.971483567941222e+17, |
|
"train_loss": 0.6257878903989439, |
|
"train_runtime": 2906.8071, |
|
"train_samples_per_second": 5.935, |
|
"train_steps_per_second": 0.093 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 270, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.971483567941222e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|