|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07451564828614009, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014903129657228018, |
|
"grad_norm": 2.0342986583709717, |
|
"learning_rate": 1e-05, |
|
"loss": 3.4859, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014903129657228018, |
|
"eval_loss": 3.4842216968536377, |
|
"eval_runtime": 3.0911, |
|
"eval_samples_per_second": 182.782, |
|
"eval_steps_per_second": 22.969, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029806259314456036, |
|
"grad_norm": 1.7465375661849976, |
|
"learning_rate": 2e-05, |
|
"loss": 3.4165, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004470938897168405, |
|
"grad_norm": 1.9989283084869385, |
|
"learning_rate": 3e-05, |
|
"loss": 3.4885, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005961251862891207, |
|
"grad_norm": 1.7901273965835571, |
|
"learning_rate": 4e-05, |
|
"loss": 3.388, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007451564828614009, |
|
"grad_norm": 1.8343313932418823, |
|
"learning_rate": 5e-05, |
|
"loss": 3.4413, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00894187779433681, |
|
"grad_norm": 1.9110901355743408, |
|
"learning_rate": 6e-05, |
|
"loss": 3.4883, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010432190760059613, |
|
"grad_norm": 1.8050709962844849, |
|
"learning_rate": 7e-05, |
|
"loss": 3.4939, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.011922503725782414, |
|
"grad_norm": 1.8594285249710083, |
|
"learning_rate": 8e-05, |
|
"loss": 3.3683, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013412816691505217, |
|
"grad_norm": 2.0457093715667725, |
|
"learning_rate": 9e-05, |
|
"loss": 3.4379, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.013412816691505217, |
|
"eval_loss": 3.1294145584106445, |
|
"eval_runtime": 2.6674, |
|
"eval_samples_per_second": 211.814, |
|
"eval_steps_per_second": 26.617, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014903129657228018, |
|
"grad_norm": 1.7745529413223267, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1102, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01639344262295082, |
|
"grad_norm": 1.6581326723098755, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.9326, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01788375558867362, |
|
"grad_norm": 1.645290732383728, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.813, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.019374068554396422, |
|
"grad_norm": 1.6816961765289307, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.6876, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.020864381520119227, |
|
"grad_norm": 1.6662911176681519, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.6106, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.022354694485842028, |
|
"grad_norm": 1.542525053024292, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.5321, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02384500745156483, |
|
"grad_norm": 1.4631415605545044, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.3101, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02533532041728763, |
|
"grad_norm": 1.5643318891525269, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.2768, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.026825633383010434, |
|
"grad_norm": 1.6115552186965942, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.1149, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.026825633383010434, |
|
"eval_loss": 2.07175612449646, |
|
"eval_runtime": 2.6646, |
|
"eval_samples_per_second": 212.036, |
|
"eval_steps_per_second": 26.645, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.028315946348733235, |
|
"grad_norm": 1.5824073553085327, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0145, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.029806259314456036, |
|
"grad_norm": 1.674318790435791, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.047, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03129657228017884, |
|
"grad_norm": 1.6824314594268799, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.8464, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03278688524590164, |
|
"grad_norm": 1.9013198614120483, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.7421, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03427719821162444, |
|
"grad_norm": 1.8643028736114502, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.7526, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03576751117734724, |
|
"grad_norm": 1.9997265338897705, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.6996, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.037257824143070044, |
|
"grad_norm": 2.215336322784424, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.6416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.038748137108792845, |
|
"grad_norm": 2.1552388668060303, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.3777, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.040238450074515646, |
|
"grad_norm": 2.4514808654785156, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 1.3392, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.040238450074515646, |
|
"eval_loss": 1.2882710695266724, |
|
"eval_runtime": 2.7452, |
|
"eval_samples_per_second": 205.816, |
|
"eval_steps_per_second": 25.864, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.041728763040238454, |
|
"grad_norm": 2.4231464862823486, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.3952, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.043219076005961254, |
|
"grad_norm": 2.0393948554992676, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 1.1851, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.044709388971684055, |
|
"grad_norm": 1.9575780630111694, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.1068, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.046199701937406856, |
|
"grad_norm": 1.8777871131896973, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 1.0618, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04769001490312966, |
|
"grad_norm": 2.131181001663208, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.9559, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04918032786885246, |
|
"grad_norm": 2.0089690685272217, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.903, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05067064083457526, |
|
"grad_norm": 1.756807565689087, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.8165, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05216095380029806, |
|
"grad_norm": 2.0079643726348877, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.8495, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05365126676602087, |
|
"grad_norm": 2.026780366897583, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.8717, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05365126676602087, |
|
"eval_loss": 0.7020303606987, |
|
"eval_runtime": 2.7139, |
|
"eval_samples_per_second": 208.188, |
|
"eval_steps_per_second": 26.162, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05514157973174367, |
|
"grad_norm": 2.029473304748535, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.6712, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05663189269746647, |
|
"grad_norm": 1.649437427520752, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.6271, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05812220566318927, |
|
"grad_norm": 1.7296348810195923, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.714, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05961251862891207, |
|
"grad_norm": 1.313446044921875, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.5392, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06110283159463487, |
|
"grad_norm": 1.2067147493362427, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.5773, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06259314456035768, |
|
"grad_norm": 1.3863986730575562, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.5832, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06408345752608048, |
|
"grad_norm": 1.4204689264297485, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.4459, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06557377049180328, |
|
"grad_norm": 1.5218064785003662, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.6278, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06706408345752608, |
|
"grad_norm": 1.6203991174697876, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.4472, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06706408345752608, |
|
"eval_loss": 0.4401513338088989, |
|
"eval_runtime": 2.6576, |
|
"eval_samples_per_second": 212.596, |
|
"eval_steps_per_second": 26.716, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06855439642324888, |
|
"grad_norm": 1.0682734251022339, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.3199, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07004470938897168, |
|
"grad_norm": 1.1575658321380615, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 0.3394, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07153502235469449, |
|
"grad_norm": 1.2181802988052368, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.3898, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07302533532041729, |
|
"grad_norm": 1.1372705698013306, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.468, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07451564828614009, |
|
"grad_norm": 0.888025164604187, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.3541, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 694187930419200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|