|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.14749262536873156, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0029498525073746312, |
|
"grad_norm": 2.6730434894561768, |
|
"learning_rate": 1e-05, |
|
"loss": 4.059, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029498525073746312, |
|
"eval_loss": 2.0677247047424316, |
|
"eval_runtime": 44.3198, |
|
"eval_samples_per_second": 6.453, |
|
"eval_steps_per_second": 0.812, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0058997050147492625, |
|
"grad_norm": 1.780819296836853, |
|
"learning_rate": 2e-05, |
|
"loss": 3.1111, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008849557522123894, |
|
"grad_norm": 3.14420485496521, |
|
"learning_rate": 3e-05, |
|
"loss": 3.5452, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.011799410029498525, |
|
"grad_norm": 4.393539905548096, |
|
"learning_rate": 4e-05, |
|
"loss": 4.6015, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.014749262536873156, |
|
"grad_norm": 3.3870508670806885, |
|
"learning_rate": 5e-05, |
|
"loss": 4.1767, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017699115044247787, |
|
"grad_norm": 2.174534559249878, |
|
"learning_rate": 6e-05, |
|
"loss": 3.0682, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02064896755162242, |
|
"grad_norm": 2.650453567504883, |
|
"learning_rate": 7e-05, |
|
"loss": 3.5393, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02359882005899705, |
|
"grad_norm": 2.851867437362671, |
|
"learning_rate": 8e-05, |
|
"loss": 3.6709, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02654867256637168, |
|
"grad_norm": 5.362432956695557, |
|
"learning_rate": 9e-05, |
|
"loss": 3.7535, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02654867256637168, |
|
"eval_loss": 1.4621776342391968, |
|
"eval_runtime": 44.3203, |
|
"eval_samples_per_second": 6.453, |
|
"eval_steps_per_second": 0.812, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.029498525073746312, |
|
"grad_norm": 3.1995365619659424, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1976, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.032448377581120944, |
|
"grad_norm": 2.1539673805236816, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.5042, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.035398230088495575, |
|
"grad_norm": 2.2371580600738525, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.484, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.038348082595870206, |
|
"grad_norm": 2.3283255100250244, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 1.8651, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04129793510324484, |
|
"grad_norm": 2.758920192718506, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.2593, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04424778761061947, |
|
"grad_norm": 2.5637218952178955, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.2398, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0471976401179941, |
|
"grad_norm": 3.0846474170684814, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.9655, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05014749262536873, |
|
"grad_norm": 3.055612087249756, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.5861, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05309734513274336, |
|
"grad_norm": 3.042341470718384, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.08, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05309734513274336, |
|
"eval_loss": 1.0477064847946167, |
|
"eval_runtime": 44.3486, |
|
"eval_samples_per_second": 6.449, |
|
"eval_steps_per_second": 0.812, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05604719764011799, |
|
"grad_norm": 2.4983811378479004, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.7399, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.058997050147492625, |
|
"grad_norm": 2.7760469913482666, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 1.3545, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.061946902654867256, |
|
"grad_norm": 2.5731499195098877, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.3881, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06489675516224189, |
|
"grad_norm": 1.8936318159103394, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.1502, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06784660766961652, |
|
"grad_norm": 1.976555585861206, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.1209, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07079646017699115, |
|
"grad_norm": 2.4951541423797607, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.9717, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07374631268436578, |
|
"grad_norm": 1.7388063669204712, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.8964, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07669616519174041, |
|
"grad_norm": 1.7615406513214111, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.5812, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07964601769911504, |
|
"grad_norm": 2.4763855934143066, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.2404, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07964601769911504, |
|
"eval_loss": 1.0080937147140503, |
|
"eval_runtime": 44.3909, |
|
"eval_samples_per_second": 6.443, |
|
"eval_steps_per_second": 0.811, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08259587020648967, |
|
"grad_norm": 2.52359676361084, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.7129, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0855457227138643, |
|
"grad_norm": 2.9214510917663574, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.0964, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08849557522123894, |
|
"grad_norm": 2.418994188308716, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.9937, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09144542772861357, |
|
"grad_norm": 2.003028154373169, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.5031, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0943952802359882, |
|
"grad_norm": 3.0602455139160156, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.8419, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09734513274336283, |
|
"grad_norm": 3.8223624229431152, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.7186, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10029498525073746, |
|
"grad_norm": 2.1360504627227783, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.8072, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10324483775811209, |
|
"grad_norm": 2.5613925457000732, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.4713, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10619469026548672, |
|
"grad_norm": 4.114505767822266, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.8798, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10619469026548672, |
|
"eval_loss": 0.9803729057312012, |
|
"eval_runtime": 44.3399, |
|
"eval_samples_per_second": 6.45, |
|
"eval_steps_per_second": 0.812, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10914454277286136, |
|
"grad_norm": 2.2606899738311768, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.1288, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11209439528023599, |
|
"grad_norm": 2.3474953174591064, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.8458, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11504424778761062, |
|
"grad_norm": 2.764202833175659, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 3.0195, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11799410029498525, |
|
"grad_norm": 2.4660158157348633, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.827, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12094395280235988, |
|
"grad_norm": 2.5078041553497314, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 1.3851, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12389380530973451, |
|
"grad_norm": 2.1289970874786377, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.4684, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12684365781710916, |
|
"grad_norm": 3.008479595184326, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.7663, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12979351032448377, |
|
"grad_norm": 2.681143045425415, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.6546, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13274336283185842, |
|
"grad_norm": 3.201345443725586, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.0512, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13274336283185842, |
|
"eval_loss": 0.9570127129554749, |
|
"eval_runtime": 44.365, |
|
"eval_samples_per_second": 6.447, |
|
"eval_steps_per_second": 0.811, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13569321533923304, |
|
"grad_norm": 1.8258627653121948, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.5324, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13864306784660768, |
|
"grad_norm": 1.8189584016799927, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.7706, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1415929203539823, |
|
"grad_norm": 2.40757155418396, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.6126, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14454277286135694, |
|
"grad_norm": 2.335020065307617, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.326, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.14749262536873156, |
|
"grad_norm": 1.997075080871582, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.5148, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.347789697024e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|