|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.5306122448979593, |
|
"eval_steps": 13, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01020408163265306, |
|
"eval_loss": NaN, |
|
"eval_runtime": 5.052, |
|
"eval_samples_per_second": 32.661, |
|
"eval_steps_per_second": 4.157, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.030612244897959183, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.061224489795918366, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09183673469387756, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.12244897959183673, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.994965332706573e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1326530612244898, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5223, |
|
"eval_samples_per_second": 36.486, |
|
"eval_steps_per_second": 4.644, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15306122448979592, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.968561049466214e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1836734693877551, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.919647942993148e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.848447601883435e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.24489795918367346, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2653061224489796, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5279, |
|
"eval_samples_per_second": 36.441, |
|
"eval_steps_per_second": 4.638, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2755102040816326, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.640574942595196e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.30612244897959184, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.504844339512095e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.336734693877551, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.348705665778478e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3673469387755102, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.172866268606513e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3979591836734694, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.978122744408906e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3979591836734694, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5303, |
|
"eval_samples_per_second": 36.421, |
|
"eval_steps_per_second": 4.635, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.765357330018056e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.45918367346938777, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4897959183673469, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.289693629698564e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5204081632653061, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.0289502192041e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5306122448979592, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5333, |
|
"eval_samples_per_second": 36.397, |
|
"eval_steps_per_second": 4.632, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5510204081632653, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.754484907260513e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5816326530612245, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.467541090321735e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6122448979591837, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.169418695587791e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.861468292009727e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6632653061224489, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5236, |
|
"eval_samples_per_second": 36.476, |
|
"eval_steps_per_second": 4.642, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.673469387755102, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.7040816326530612, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.22170203068947e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.7346938775510204, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.8927844739931834e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.7653061224489796, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.559822380516539e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7959183673469388, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.2243241517525754e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7959183673469388, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5322, |
|
"eval_samples_per_second": 36.406, |
|
"eval_steps_per_second": 4.634, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.826530612244898, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.887809678520976e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.551803455482833e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8877551020408163, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.9183673469387755, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.887395330218429e-05, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5272, |
|
"eval_samples_per_second": 36.447, |
|
"eval_steps_per_second": 4.639, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.9489795918367347, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.562003362839914e-05, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.9795918367346939, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.243125879593286e-05, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.010204081632653, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.932207475167398e-05, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.0408163265306123, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.630656687635007e-05, |
|
"loss": 0.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.0612244897959184, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5257, |
|
"eval_samples_per_second": 36.458, |
|
"eval_steps_per_second": 4.64, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.3398396174233178e-05, |
|
"loss": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.1020408163265305, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 0.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.1326530612244898, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7956219300748793e-05, |
|
"loss": 0.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.163265306122449, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.544686755065677e-05, |
|
"loss": 0.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.193877551020408, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3094050125632972e-05, |
|
"loss": 0.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.193877551020408, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5276, |
|
"eval_samples_per_second": 36.443, |
|
"eval_steps_per_second": 4.638, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.2244897959183674, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.090842587659851e-05, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.2551020408163265, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.899896227604509e-06, |
|
"loss": 0.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.077560319906695e-06, |
|
"loss": 0.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.316326530612245, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 0.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.3265306122448979, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5275, |
|
"eval_samples_per_second": 36.444, |
|
"eval_steps_per_second": 4.638, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.346938775510204, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.023611372427471e-06, |
|
"loss": 0.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.3775510204081631, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8058334845816213e-06, |
|
"loss": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.4081632653061225, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8018569652073381e-06, |
|
"loss": 0.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.4387755102040816, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.016230078838226e-06, |
|
"loss": 0.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.4591836734693877, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.5304, |
|
"eval_samples_per_second": 36.421, |
|
"eval_steps_per_second": 4.635, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.469387755102041, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.52511911603265e-07, |
|
"loss": 0.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.132562476771959e-07, |
|
"loss": 0.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.5306122448979593, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 150, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.081320507998208e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|