|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.024783147459727387, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004956629491945477, |
|
"grad_norm": 0.7391602396965027, |
|
"learning_rate": 1e-05, |
|
"loss": 2.3969, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004956629491945477, |
|
"eval_loss": 2.63633394241333, |
|
"eval_runtime": 19.1271, |
|
"eval_samples_per_second": 88.827, |
|
"eval_steps_per_second": 11.136, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009913258983890955, |
|
"grad_norm": 0.8179469704627991, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3856, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001486988847583643, |
|
"grad_norm": 0.7126672863960266, |
|
"learning_rate": 3e-05, |
|
"loss": 2.3, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001982651796778191, |
|
"grad_norm": 1.0962400436401367, |
|
"learning_rate": 4e-05, |
|
"loss": 3.1446, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024783147459727386, |
|
"grad_norm": 0.7755838632583618, |
|
"learning_rate": 5e-05, |
|
"loss": 2.4778, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0024783147459727386, |
|
"eval_loss": 2.6211066246032715, |
|
"eval_runtime": 17.2492, |
|
"eval_samples_per_second": 98.497, |
|
"eval_steps_per_second": 12.348, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002973977695167286, |
|
"grad_norm": 1.0329855680465698, |
|
"learning_rate": 6e-05, |
|
"loss": 3.06, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003469640644361834, |
|
"grad_norm": 0.7987164855003357, |
|
"learning_rate": 7e-05, |
|
"loss": 2.5097, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003965303593556382, |
|
"grad_norm": 0.8003743886947632, |
|
"learning_rate": 8e-05, |
|
"loss": 2.4667, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0044609665427509295, |
|
"grad_norm": 0.7125903367996216, |
|
"learning_rate": 9e-05, |
|
"loss": 2.2826, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004956629491945477, |
|
"grad_norm": 0.8778437376022339, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7076, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004956629491945477, |
|
"eval_loss": 2.435589075088501, |
|
"eval_runtime": 17.2728, |
|
"eval_samples_per_second": 98.363, |
|
"eval_steps_per_second": 12.332, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005452292441140025, |
|
"grad_norm": 0.7835959196090698, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 2.587, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005947955390334572, |
|
"grad_norm": 0.6849549412727356, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 2.4522, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00644361833952912, |
|
"grad_norm": 0.7166538238525391, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 2.4591, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006939281288723668, |
|
"grad_norm": 0.5679525136947632, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0319, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007434944237918215, |
|
"grad_norm": 0.6390686631202698, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.2686, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007434944237918215, |
|
"eval_loss": 2.1834659576416016, |
|
"eval_runtime": 17.2849, |
|
"eval_samples_per_second": 98.294, |
|
"eval_steps_per_second": 12.323, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007930607187112764, |
|
"grad_norm": 0.6615875959396362, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 2.2409, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00842627013630731, |
|
"grad_norm": 0.5758230686187744, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 1.8906, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008921933085501859, |
|
"grad_norm": 0.6313810348510742, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.0695, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009417596034696406, |
|
"grad_norm": 0.4957845211029053, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 1.7364, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009913258983890954, |
|
"grad_norm": 0.6371930837631226, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.0556, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009913258983890954, |
|
"eval_loss": 1.9821568727493286, |
|
"eval_runtime": 17.2924, |
|
"eval_samples_per_second": 98.251, |
|
"eval_steps_per_second": 12.318, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010408921933085501, |
|
"grad_norm": 0.6001903414726257, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 1.9351, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01090458488228005, |
|
"grad_norm": 0.654202401638031, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.889, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011400247831474598, |
|
"grad_norm": 0.6593927145004272, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 2.0221, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011895910780669145, |
|
"grad_norm": 0.48587918281555176, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 1.5167, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012391573729863693, |
|
"grad_norm": 0.5013470649719238, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.581, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012391573729863693, |
|
"eval_loss": 1.8297536373138428, |
|
"eval_runtime": 17.273, |
|
"eval_samples_per_second": 98.361, |
|
"eval_steps_per_second": 12.331, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01288723667905824, |
|
"grad_norm": 0.7884762287139893, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.0006, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013382899628252789, |
|
"grad_norm": 0.6535519957542419, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 1.6955, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013878562577447335, |
|
"grad_norm": 0.6614362597465515, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 1.7235, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014374225526641884, |
|
"grad_norm": 0.569136917591095, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 1.6187, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01486988847583643, |
|
"grad_norm": 0.8128046989440918, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8391, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01486988847583643, |
|
"eval_loss": 1.7055069208145142, |
|
"eval_runtime": 17.3629, |
|
"eval_samples_per_second": 97.852, |
|
"eval_steps_per_second": 12.268, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015365551425030979, |
|
"grad_norm": 0.5898133516311646, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 1.4803, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.015861214374225528, |
|
"grad_norm": 0.8634248375892639, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 1.9185, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016356877323420074, |
|
"grad_norm": 0.7606371641159058, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 1.7444, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01685254027261462, |
|
"grad_norm": 0.827926516532898, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.5359, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01734820322180917, |
|
"grad_norm": 0.6055005192756653, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.3024, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01734820322180917, |
|
"eval_loss": 1.6104322671890259, |
|
"eval_runtime": 17.2589, |
|
"eval_samples_per_second": 98.442, |
|
"eval_steps_per_second": 12.341, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017843866171003718, |
|
"grad_norm": 0.8044369220733643, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 1.5692, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.018339529120198265, |
|
"grad_norm": 0.6717920303344727, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 1.6285, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01883519206939281, |
|
"grad_norm": 0.7943913340568542, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.5588, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.019330855018587362, |
|
"grad_norm": 0.7974594235420227, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 1.4334, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01982651796778191, |
|
"grad_norm": 0.7847771048545837, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.6791, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01982651796778191, |
|
"eval_loss": 1.5548481941223145, |
|
"eval_runtime": 17.2904, |
|
"eval_samples_per_second": 98.263, |
|
"eval_steps_per_second": 12.319, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020322180916976455, |
|
"grad_norm": 0.6804707050323486, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 1.5037, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.020817843866171002, |
|
"grad_norm": 0.8448418378829956, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.6357, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021313506815365552, |
|
"grad_norm": 0.8616818189620972, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 1.5866, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0218091697645601, |
|
"grad_norm": 0.6975346803665161, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 1.4729, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.022304832713754646, |
|
"grad_norm": 0.6911585927009583, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.3803, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.022304832713754646, |
|
"eval_loss": 1.5319888591766357, |
|
"eval_runtime": 17.2847, |
|
"eval_samples_per_second": 98.295, |
|
"eval_steps_per_second": 12.323, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.022800495662949196, |
|
"grad_norm": 0.6003568172454834, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 1.4585, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.023296158612143743, |
|
"grad_norm": 0.7089613676071167, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 1.4629, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02379182156133829, |
|
"grad_norm": 0.7499310970306396, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 1.4805, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.024287484510532836, |
|
"grad_norm": 0.7139114141464233, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 1.5951, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.024783147459727387, |
|
"grad_norm": 0.7746477127075195, |
|
"learning_rate": 0.0, |
|
"loss": 1.6068, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024783147459727387, |
|
"eval_loss": 1.527747392654419, |
|
"eval_runtime": 17.2729, |
|
"eval_samples_per_second": 98.362, |
|
"eval_steps_per_second": 12.331, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 763606723461120.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|