|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0327653997378768, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000655307994757536, |
|
"grad_norm": 0.016265055164694786, |
|
"learning_rate": 1e-05, |
|
"loss": 11.9312, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000655307994757536, |
|
"eval_loss": 11.931286811828613, |
|
"eval_runtime": 28.0473, |
|
"eval_samples_per_second": 91.667, |
|
"eval_steps_per_second": 45.851, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001310615989515072, |
|
"grad_norm": 0.014278396964073181, |
|
"learning_rate": 2e-05, |
|
"loss": 11.9309, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001965923984272608, |
|
"grad_norm": 0.011991339735686779, |
|
"learning_rate": 3e-05, |
|
"loss": 11.9327, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002621231979030144, |
|
"grad_norm": 0.01234712265431881, |
|
"learning_rate": 4e-05, |
|
"loss": 11.9315, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00327653997378768, |
|
"grad_norm": 0.017012331634759903, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9296, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003931847968545216, |
|
"grad_norm": 0.014782086946070194, |
|
"learning_rate": 6e-05, |
|
"loss": 11.9329, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0045871559633027525, |
|
"grad_norm": 0.020223338156938553, |
|
"learning_rate": 7e-05, |
|
"loss": 11.9317, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005242463958060288, |
|
"grad_norm": 0.013544556684792042, |
|
"learning_rate": 8e-05, |
|
"loss": 11.9312, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005897771952817824, |
|
"grad_norm": 0.012503627687692642, |
|
"learning_rate": 9e-05, |
|
"loss": 11.9315, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00655307994757536, |
|
"grad_norm": 0.012558264657855034, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9305, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007208387942332897, |
|
"grad_norm": 0.015282824635505676, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 11.9317, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007863695937090432, |
|
"grad_norm": 0.01448417454957962, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 11.9314, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00851900393184797, |
|
"grad_norm": 0.012158408761024475, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 11.9323, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00851900393184797, |
|
"eval_loss": 11.93112564086914, |
|
"eval_runtime": 12.5386, |
|
"eval_samples_per_second": 205.047, |
|
"eval_steps_per_second": 102.563, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009174311926605505, |
|
"grad_norm": 0.011835216544568539, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 11.9299, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009829619921363041, |
|
"grad_norm": 0.01589955948293209, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 11.9325, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010484927916120577, |
|
"grad_norm": 0.010300605557858944, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 11.93, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011140235910878113, |
|
"grad_norm": 0.013794222846627235, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 11.9315, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011795543905635648, |
|
"grad_norm": 0.014298832975327969, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 11.9313, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012450851900393184, |
|
"grad_norm": 0.017451252788305283, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 11.9296, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01310615989515072, |
|
"grad_norm": 0.017337026074528694, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.9303, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.013761467889908258, |
|
"grad_norm": 0.016705993562936783, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 11.9319, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014416775884665793, |
|
"grad_norm": 0.014467249624431133, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 11.9311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01507208387942333, |
|
"grad_norm": 0.017065448686480522, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 11.9321, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015727391874180863, |
|
"grad_norm": 0.014603197574615479, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 11.9317, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0163826998689384, |
|
"grad_norm": 0.020358189940452576, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 11.9313, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01703800786369594, |
|
"grad_norm": 0.012381589971482754, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 11.9309, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01703800786369594, |
|
"eval_loss": 11.930882453918457, |
|
"eval_runtime": 12.5197, |
|
"eval_samples_per_second": 205.357, |
|
"eval_steps_per_second": 102.718, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.017693315858453473, |
|
"grad_norm": 0.014943009242415428, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 11.9304, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01834862385321101, |
|
"grad_norm": 0.02193116582930088, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 11.9322, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.019003931847968544, |
|
"grad_norm": 0.012562782503664494, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 11.9306, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.019659239842726082, |
|
"grad_norm": 0.013389959931373596, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9316, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.020314547837483616, |
|
"grad_norm": 0.01724652200937271, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 11.9305, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.020969855832241154, |
|
"grad_norm": 0.018411165103316307, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 11.9313, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02162516382699869, |
|
"grad_norm": 0.013785377144813538, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 11.9316, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.022280471821756225, |
|
"grad_norm": 0.018790163099765778, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 11.9323, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.022935779816513763, |
|
"grad_norm": 0.015211019665002823, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 11.9303, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.023591087811271297, |
|
"grad_norm": 0.01657300628721714, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 11.9292, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.024246395806028834, |
|
"grad_norm": 0.01142768282443285, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 11.9314, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02490170380078637, |
|
"grad_norm": 0.021842923015356064, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 11.9316, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.025557011795543906, |
|
"grad_norm": 0.019510934129357338, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 11.9325, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.025557011795543906, |
|
"eval_loss": 11.930744171142578, |
|
"eval_runtime": 12.5232, |
|
"eval_samples_per_second": 205.298, |
|
"eval_steps_per_second": 102.689, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02621231979030144, |
|
"grad_norm": 0.013880622573196888, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.9314, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.026867627785058978, |
|
"grad_norm": 0.019777296110987663, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 11.9305, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.027522935779816515, |
|
"grad_norm": 0.013822672888636589, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 11.9314, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.02817824377457405, |
|
"grad_norm": 0.016023075208067894, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 11.9313, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.028833551769331587, |
|
"grad_norm": 0.01633632369339466, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 11.931, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02948885976408912, |
|
"grad_norm": 0.014971333555877209, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.9318, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03014416775884666, |
|
"grad_norm": 0.01602235995233059, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 11.9318, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.030799475753604193, |
|
"grad_norm": 0.013982513919472694, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 11.9317, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03145478374836173, |
|
"grad_norm": 0.01966976933181286, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 11.9299, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03211009174311927, |
|
"grad_norm": 0.018536817282438278, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 11.9314, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0327653997378768, |
|
"grad_norm": 0.01766209863126278, |
|
"learning_rate": 0.0, |
|
"loss": 11.9292, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 299787878400.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|