|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2506265664160401, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005012531328320802, |
|
"grad_norm": 20.089641571044922, |
|
"learning_rate": 1e-05, |
|
"loss": 20.1344, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005012531328320802, |
|
"eval_loss": 10.39857006072998, |
|
"eval_runtime": 28.5845, |
|
"eval_samples_per_second": 5.877, |
|
"eval_steps_per_second": 0.735, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010025062656641603, |
|
"grad_norm": 19.52311134338379, |
|
"learning_rate": 2e-05, |
|
"loss": 22.4254, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.015037593984962405, |
|
"grad_norm": 14.5244140625, |
|
"learning_rate": 3e-05, |
|
"loss": 20.1972, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 21.34798812866211, |
|
"learning_rate": 4e-05, |
|
"loss": 22.7661, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02506265664160401, |
|
"grad_norm": 17.969993591308594, |
|
"learning_rate": 5e-05, |
|
"loss": 19.4781, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03007518796992481, |
|
"grad_norm": 19.54123306274414, |
|
"learning_rate": 6e-05, |
|
"loss": 21.2443, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 19.612939834594727, |
|
"learning_rate": 7e-05, |
|
"loss": 20.0462, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 23.63931655883789, |
|
"learning_rate": 8e-05, |
|
"loss": 17.3324, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.045112781954887216, |
|
"grad_norm": 20.25324821472168, |
|
"learning_rate": 9e-05, |
|
"loss": 16.8131, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.045112781954887216, |
|
"eval_loss": 8.083789825439453, |
|
"eval_runtime": 28.6671, |
|
"eval_samples_per_second": 5.86, |
|
"eval_steps_per_second": 0.733, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05012531328320802, |
|
"grad_norm": 22.54298210144043, |
|
"learning_rate": 0.0001, |
|
"loss": 15.429, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05513784461152882, |
|
"grad_norm": 27.352766036987305, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 17.369, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06015037593984962, |
|
"grad_norm": 24.816608428955078, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 12.8618, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06516290726817042, |
|
"grad_norm": 22.56251335144043, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 11.7916, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 24.081544876098633, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 8.7375, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07518796992481203, |
|
"grad_norm": 16.781705856323242, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 5.8874, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08020050125313283, |
|
"grad_norm": 21.11363983154297, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 5.2726, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08521303258145363, |
|
"grad_norm": 17.36714744567871, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 5.7834, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09022556390977443, |
|
"grad_norm": 12.790335655212402, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 3.3452, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09022556390977443, |
|
"eval_loss": 1.4436625242233276, |
|
"eval_runtime": 28.6933, |
|
"eval_samples_per_second": 5.855, |
|
"eval_steps_per_second": 0.732, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 12.467353820800781, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.57, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10025062656641603, |
|
"grad_norm": 12.546868324279785, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.2274, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 15.894756317138672, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.0864, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11027568922305764, |
|
"grad_norm": 11.902620315551758, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.9417, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11528822055137844, |
|
"grad_norm": 7.588188648223877, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.6944, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12030075187969924, |
|
"grad_norm": 7.501560688018799, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.6366, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12531328320802004, |
|
"grad_norm": 4.7584309577941895, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.5247, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13032581453634084, |
|
"grad_norm": 2.9300928115844727, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.4553, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13533834586466165, |
|
"grad_norm": 3.2299232482910156, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 1.3305, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.13533834586466165, |
|
"eval_loss": 0.7951204180717468, |
|
"eval_runtime": 28.6441, |
|
"eval_samples_per_second": 5.865, |
|
"eval_steps_per_second": 0.733, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 3.232689142227173, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.2491, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14536340852130325, |
|
"grad_norm": 5.9495344161987305, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 1.6638, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.15037593984962405, |
|
"grad_norm": 9.425905227661133, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.6769, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15538847117794485, |
|
"grad_norm": 4.587433815002441, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 1.627, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.16040100250626566, |
|
"grad_norm": 2.9762625694274902, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.2048, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.16541353383458646, |
|
"grad_norm": 2.8089957237243652, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.3888, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17042606516290726, |
|
"grad_norm": 1.2890253067016602, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 1.34, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 2.3775737285614014, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.4306, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18045112781954886, |
|
"grad_norm": 2.7069523334503174, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.4448, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18045112781954886, |
|
"eval_loss": 0.6974722743034363, |
|
"eval_runtime": 28.6505, |
|
"eval_samples_per_second": 5.864, |
|
"eval_steps_per_second": 0.733, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18546365914786966, |
|
"grad_norm": 2.9651219844818115, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.4529, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 4.528919219970703, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.3969, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19548872180451127, |
|
"grad_norm": 2.145470142364502, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 1.4055, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20050125313283207, |
|
"grad_norm": 3.200515031814575, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.4332, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.20551378446115287, |
|
"grad_norm": 2.2597758769989014, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 1.4308, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 1.4869681596755981, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.3283, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21553884711779447, |
|
"grad_norm": 2.0091750621795654, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.3992, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22055137844611528, |
|
"grad_norm": 5.310917377471924, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.599, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.22556390977443608, |
|
"grad_norm": 3.153137445449829, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.4812, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22556390977443608, |
|
"eval_loss": 0.7087278366088867, |
|
"eval_runtime": 28.9933, |
|
"eval_samples_per_second": 5.794, |
|
"eval_steps_per_second": 0.724, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23057644110275688, |
|
"grad_norm": 2.8991687297821045, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.4612, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.23558897243107768, |
|
"grad_norm": 2.4431586265563965, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.4148, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.24060150375939848, |
|
"grad_norm": 2.443445920944214, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.4315, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.24561403508771928, |
|
"grad_norm": 1.969445824623108, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.3969, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2506265664160401, |
|
"grad_norm": 2.1230924129486084, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.4047, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.946469637318246e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|