|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09082652134423251, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018165304268846503, |
|
"grad_norm": 0.3600485622882843, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4664, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0018165304268846503, |
|
"eval_loss": 0.4619494378566742, |
|
"eval_runtime": 108.3327, |
|
"eval_samples_per_second": 4.283, |
|
"eval_steps_per_second": 0.535, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036330608537693005, |
|
"grad_norm": 0.33697664737701416, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4766, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005449591280653951, |
|
"grad_norm": 0.3403802812099457, |
|
"learning_rate": 3e-05, |
|
"loss": 0.4548, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007266121707538601, |
|
"grad_norm": 0.3392443060874939, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4661, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.009082652134423252, |
|
"grad_norm": 0.39870426058769226, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4286, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010899182561307902, |
|
"grad_norm": 0.3802020847797394, |
|
"learning_rate": 6e-05, |
|
"loss": 0.4409, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.012715712988192553, |
|
"grad_norm": 0.3801540732383728, |
|
"learning_rate": 7e-05, |
|
"loss": 0.4209, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.014532243415077202, |
|
"grad_norm": 0.38932061195373535, |
|
"learning_rate": 8e-05, |
|
"loss": 0.4188, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01634877384196185, |
|
"grad_norm": 0.3350403308868408, |
|
"learning_rate": 9e-05, |
|
"loss": 0.396, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01634877384196185, |
|
"eval_loss": 0.3949369788169861, |
|
"eval_runtime": 108.084, |
|
"eval_samples_per_second": 4.293, |
|
"eval_steps_per_second": 0.537, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.018165304268846504, |
|
"grad_norm": 0.33399584889411926, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3662, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.019981834695731154, |
|
"grad_norm": 0.3276408314704895, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 0.3859, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.021798365122615803, |
|
"grad_norm": 0.3103690445423126, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.3512, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.023614895549500452, |
|
"grad_norm": 0.45643332600593567, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.3913, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.025431425976385105, |
|
"grad_norm": 0.38945508003234863, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.3104, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.027247956403269755, |
|
"grad_norm": 0.33456993103027344, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.3705, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.029064486830154404, |
|
"grad_norm": 0.4321489632129669, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.3217, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.030881017257039057, |
|
"grad_norm": 0.3709757924079895, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 0.3113, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0326975476839237, |
|
"grad_norm": 0.33215710520744324, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.3293, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0326975476839237, |
|
"eval_loss": 0.32641759514808655, |
|
"eval_runtime": 108.5742, |
|
"eval_samples_per_second": 4.274, |
|
"eval_steps_per_second": 0.534, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03451407811080836, |
|
"grad_norm": 0.40216928720474243, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.385, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03633060853769301, |
|
"grad_norm": 0.3322986662387848, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.2936, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03814713896457766, |
|
"grad_norm": 0.339769572019577, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.329, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03996366939146231, |
|
"grad_norm": 0.34345075488090515, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.2768, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04178019981834696, |
|
"grad_norm": 0.3251555860042572, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 0.2831, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.043596730245231606, |
|
"grad_norm": 0.35843387246131897, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.402, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.045413260672116255, |
|
"grad_norm": 0.31343409419059753, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.2751, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.047229791099000905, |
|
"grad_norm": 0.3558681607246399, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.3265, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04904632152588556, |
|
"grad_norm": 0.30473554134368896, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.241, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04904632152588556, |
|
"eval_loss": 0.30456194281578064, |
|
"eval_runtime": 108.8274, |
|
"eval_samples_per_second": 4.264, |
|
"eval_steps_per_second": 0.533, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05086285195277021, |
|
"grad_norm": 0.4298734962940216, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.2689, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05267938237965486, |
|
"grad_norm": 0.3488064110279083, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 0.2775, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05449591280653951, |
|
"grad_norm": 0.4139954745769501, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.3113, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05631244323342416, |
|
"grad_norm": 0.3354235887527466, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.3051, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05812897366030881, |
|
"grad_norm": 0.41172316670417786, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.3233, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05994550408719346, |
|
"grad_norm": 0.3322085738182068, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.3209, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.061762034514078114, |
|
"grad_norm": 0.3583935797214508, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.3255, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06357856494096276, |
|
"grad_norm": 0.2800469696521759, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.3104, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0653950953678474, |
|
"grad_norm": 0.4980929493904114, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.3561, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0653950953678474, |
|
"eval_loss": 0.2919258773326874, |
|
"eval_runtime": 108.6083, |
|
"eval_samples_per_second": 4.272, |
|
"eval_steps_per_second": 0.534, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06721162579473206, |
|
"grad_norm": 0.3218163549900055, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.2759, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06902815622161672, |
|
"grad_norm": 0.29215186834335327, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.3034, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07084468664850137, |
|
"grad_norm": 0.3236890733242035, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.278, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07266121707538602, |
|
"grad_norm": 0.3118452727794647, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.3184, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07447774750227067, |
|
"grad_norm": 0.2818981111049652, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.2823, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07629427792915532, |
|
"grad_norm": 0.2725626230239868, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.2795, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07811080835603997, |
|
"grad_norm": 0.28230541944503784, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.2023, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07992733878292461, |
|
"grad_norm": 0.24861739575862885, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.2655, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08174386920980926, |
|
"grad_norm": 0.29663726687431335, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.2904, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08174386920980926, |
|
"eval_loss": 0.2852410078048706, |
|
"eval_runtime": 108.7895, |
|
"eval_samples_per_second": 4.265, |
|
"eval_steps_per_second": 0.533, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08356039963669391, |
|
"grad_norm": 0.27381181716918945, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.2109, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08537693006357856, |
|
"grad_norm": 0.3276028335094452, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 0.3141, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08719346049046321, |
|
"grad_norm": 0.2625039219856262, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.2521, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08900999091734786, |
|
"grad_norm": 0.31556904315948486, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.2891, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09082652134423251, |
|
"grad_norm": 0.2701485753059387, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.2474, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.19631055627223e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|