|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0012675556456928458, |
|
"eval_steps": 3, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.070222582771384e-05, |
|
"grad_norm": 16.734760284423828, |
|
"learning_rate": 2e-05, |
|
"loss": 29.9648, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5.070222582771384e-05, |
|
"eval_loss": 8.524748802185059, |
|
"eval_runtime": 1510.4845, |
|
"eval_samples_per_second": 5.498, |
|
"eval_steps_per_second": 2.749, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00010140445165542768, |
|
"grad_norm": 14.790090560913086, |
|
"learning_rate": 4e-05, |
|
"loss": 33.7103, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0001521066774831415, |
|
"grad_norm": 14.030793190002441, |
|
"learning_rate": 6e-05, |
|
"loss": 29.0882, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0001521066774831415, |
|
"eval_loss": 8.50710391998291, |
|
"eval_runtime": 1519.8925, |
|
"eval_samples_per_second": 5.464, |
|
"eval_steps_per_second": 2.732, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00020280890331085536, |
|
"grad_norm": 12.610204696655273, |
|
"learning_rate": 8e-05, |
|
"loss": 28.2266, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0002535111291385692, |
|
"grad_norm": 16.985925674438477, |
|
"learning_rate": 0.0001, |
|
"loss": 35.9331, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.000304213354966283, |
|
"grad_norm": 12.154075622558594, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 26.2052, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.000304213354966283, |
|
"eval_loss": 8.200469017028809, |
|
"eval_runtime": 1521.6515, |
|
"eval_samples_per_second": 5.458, |
|
"eval_steps_per_second": 2.729, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00035491558079399685, |
|
"grad_norm": 25.15923500061035, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 41.4569, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0004056178066217107, |
|
"grad_norm": 21.290430068969727, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 26.694, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0004563200324494245, |
|
"grad_norm": 23.049171447753906, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 29.4909, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0004563200324494245, |
|
"eval_loss": 6.978517055511475, |
|
"eval_runtime": 1521.0316, |
|
"eval_samples_per_second": 5.46, |
|
"eval_steps_per_second": 2.73, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005070222582771384, |
|
"grad_norm": 26.38067054748535, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 34.6105, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005577244841048522, |
|
"grad_norm": 24.686254501342773, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 23.0639, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.000608426709932566, |
|
"grad_norm": 22.281373977661133, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 21.833, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.000608426709932566, |
|
"eval_loss": 5.586967945098877, |
|
"eval_runtime": 1519.8132, |
|
"eval_samples_per_second": 5.464, |
|
"eval_steps_per_second": 2.733, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0006591289357602798, |
|
"grad_norm": 164.1787567138672, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 20.3158, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0007098311615879937, |
|
"grad_norm": 23.327159881591797, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 21.234, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0007605333874157076, |
|
"grad_norm": 17.704011917114258, |
|
"learning_rate": 5e-05, |
|
"loss": 13.4687, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0007605333874157076, |
|
"eval_loss": 4.6250200271606445, |
|
"eval_runtime": 1519.2183, |
|
"eval_samples_per_second": 5.467, |
|
"eval_steps_per_second": 2.734, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0008112356132434214, |
|
"grad_norm": 21.224292755126953, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 16.2957, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0008619378390711352, |
|
"grad_norm": 27.829790115356445, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 23.9581, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.000912640064898849, |
|
"grad_norm": 27.01038360595703, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 19.2738, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.000912640064898849, |
|
"eval_loss": 3.9614384174346924, |
|
"eval_runtime": 1519.4201, |
|
"eval_samples_per_second": 5.466, |
|
"eval_steps_per_second": 2.733, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0009633422907265629, |
|
"grad_norm": 31.614938735961914, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 16.2201, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0010140445165542768, |
|
"grad_norm": 30.133304595947266, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 13.2166, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0010647467423819906, |
|
"grad_norm": 30.905317306518555, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 17.1181, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0010647467423819906, |
|
"eval_loss": 3.6349897384643555, |
|
"eval_runtime": 1521.227, |
|
"eval_samples_per_second": 5.459, |
|
"eval_steps_per_second": 2.73, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0011154489682097045, |
|
"grad_norm": 30.500104904174805, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 11.5841, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0011661511940374183, |
|
"grad_norm": 33.284156799316406, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 17.699, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.001216853419865132, |
|
"grad_norm": 22.113712310791016, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 9.9624, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.001216853419865132, |
|
"eval_loss": 3.54598331451416, |
|
"eval_runtime": 1519.2854, |
|
"eval_samples_per_second": 5.466, |
|
"eval_steps_per_second": 2.734, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0012675556456928458, |
|
"grad_norm": 35.23492431640625, |
|
"learning_rate": 0.0, |
|
"loss": 14.2806, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.390484150386688e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|