|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.14749262536873156, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0029498525073746312, |
|
"grad_norm": 2.6695961952209473, |
|
"learning_rate": 1e-05, |
|
"loss": 4.059, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029498525073746312, |
|
"eval_loss": 2.0677247047424316, |
|
"eval_runtime": 44.3994, |
|
"eval_samples_per_second": 6.442, |
|
"eval_steps_per_second": 0.811, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0058997050147492625, |
|
"grad_norm": 1.780763030052185, |
|
"learning_rate": 2e-05, |
|
"loss": 3.1111, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008849557522123894, |
|
"grad_norm": 3.134167194366455, |
|
"learning_rate": 3e-05, |
|
"loss": 3.5404, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.011799410029498525, |
|
"grad_norm": 4.528249263763428, |
|
"learning_rate": 4e-05, |
|
"loss": 4.5936, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.014749262536873156, |
|
"grad_norm": 3.391472339630127, |
|
"learning_rate": 5e-05, |
|
"loss": 4.1795, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017699115044247787, |
|
"grad_norm": 2.1838064193725586, |
|
"learning_rate": 6e-05, |
|
"loss": 3.0666, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02064896755162242, |
|
"grad_norm": 2.6701786518096924, |
|
"learning_rate": 7e-05, |
|
"loss": 3.5408, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02359882005899705, |
|
"grad_norm": 2.9336118698120117, |
|
"learning_rate": 8e-05, |
|
"loss": 3.67, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02654867256637168, |
|
"grad_norm": 5.5627264976501465, |
|
"learning_rate": 9e-05, |
|
"loss": 3.7612, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02654867256637168, |
|
"eval_loss": 1.4617587327957153, |
|
"eval_runtime": 44.4212, |
|
"eval_samples_per_second": 6.438, |
|
"eval_steps_per_second": 0.81, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.029498525073746312, |
|
"grad_norm": 3.2141149044036865, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1932, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.032448377581120944, |
|
"grad_norm": 2.183711051940918, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.4995, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.035398230088495575, |
|
"grad_norm": 2.1251938343048096, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.4882, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.038348082595870206, |
|
"grad_norm": 2.368736743927002, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 1.8657, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04129793510324484, |
|
"grad_norm": 3.1230757236480713, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.2617, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04424778761061947, |
|
"grad_norm": 2.4695842266082764, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.24, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0471976401179941, |
|
"grad_norm": 3.388068199157715, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.9291, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05014749262536873, |
|
"grad_norm": 3.315546989440918, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.5513, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05309734513274336, |
|
"grad_norm": 3.138029098510742, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.061, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05309734513274336, |
|
"eval_loss": 1.0333534479141235, |
|
"eval_runtime": 44.4437, |
|
"eval_samples_per_second": 6.435, |
|
"eval_steps_per_second": 0.81, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05604719764011799, |
|
"grad_norm": 2.192615509033203, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.6975, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.058997050147492625, |
|
"grad_norm": 2.5154130458831787, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 1.3443, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.061946902654867256, |
|
"grad_norm": 2.76058030128479, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.3646, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06489675516224189, |
|
"grad_norm": 1.8679944276809692, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.1289, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06784660766961652, |
|
"grad_norm": 1.9377188682556152, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.1156, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07079646017699115, |
|
"grad_norm": 2.5624561309814453, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.9728, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07374631268436578, |
|
"grad_norm": 1.7926393747329712, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.9014, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07669616519174041, |
|
"grad_norm": 1.742391586303711, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.5669, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07964601769911504, |
|
"grad_norm": 2.4783685207366943, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.2309, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07964601769911504, |
|
"eval_loss": 1.0076097249984741, |
|
"eval_runtime": 44.4736, |
|
"eval_samples_per_second": 6.431, |
|
"eval_steps_per_second": 0.809, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08259587020648967, |
|
"grad_norm": 2.483924388885498, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.7079, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0855457227138643, |
|
"grad_norm": 2.8759710788726807, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.1136, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08849557522123894, |
|
"grad_norm": 2.6359269618988037, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.9826, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09144542772861357, |
|
"grad_norm": 1.697524905204773, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.5094, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0943952802359882, |
|
"grad_norm": 3.194821834564209, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.8343, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09734513274336283, |
|
"grad_norm": 3.587616443634033, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.7127, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10029498525073746, |
|
"grad_norm": 2.2063448429107666, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.7964, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10324483775811209, |
|
"grad_norm": 2.4330263137817383, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.4703, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.10619469026548672, |
|
"grad_norm": 4.004213333129883, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.8824, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10619469026548672, |
|
"eval_loss": 0.9778890609741211, |
|
"eval_runtime": 44.3941, |
|
"eval_samples_per_second": 6.442, |
|
"eval_steps_per_second": 0.811, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.10914454277286136, |
|
"grad_norm": 2.327597141265869, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.1188, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11209439528023599, |
|
"grad_norm": 2.4239344596862793, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.8542, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11504424778761062, |
|
"grad_norm": 2.819444417953491, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 3.0174, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.11799410029498525, |
|
"grad_norm": 2.2955985069274902, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.8439, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12094395280235988, |
|
"grad_norm": 2.412621021270752, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 1.3983, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12389380530973451, |
|
"grad_norm": 2.1134157180786133, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.4839, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.12684365781710916, |
|
"grad_norm": 3.077287197113037, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.7456, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12979351032448377, |
|
"grad_norm": 2.8075761795043945, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.6592, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13274336283185842, |
|
"grad_norm": 3.0821831226348877, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.0796, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13274336283185842, |
|
"eval_loss": 0.9554188251495361, |
|
"eval_runtime": 44.4051, |
|
"eval_samples_per_second": 6.441, |
|
"eval_steps_per_second": 0.811, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13569321533923304, |
|
"grad_norm": 1.895007848739624, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.5245, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.13864306784660768, |
|
"grad_norm": 1.7392289638519287, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.772, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1415929203539823, |
|
"grad_norm": 2.4289803504943848, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.5995, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14454277286135694, |
|
"grad_norm": 2.307551383972168, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.3335, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.14749262536873156, |
|
"grad_norm": 2.0941660404205322, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.5142, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.347789697024e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|