|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.012391573729863693, |
|
"eval_steps": 5, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004956629491945477, |
|
"grad_norm": 0.7391602396965027, |
|
"learning_rate": 1e-05, |
|
"loss": 2.3969, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004956629491945477, |
|
"eval_loss": 2.63633394241333, |
|
"eval_runtime": 19.1271, |
|
"eval_samples_per_second": 88.827, |
|
"eval_steps_per_second": 11.136, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009913258983890955, |
|
"grad_norm": 0.8179469704627991, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3856, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001486988847583643, |
|
"grad_norm": 0.7126672863960266, |
|
"learning_rate": 3e-05, |
|
"loss": 2.3, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001982651796778191, |
|
"grad_norm": 1.0962400436401367, |
|
"learning_rate": 4e-05, |
|
"loss": 3.1446, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024783147459727386, |
|
"grad_norm": 0.7755838632583618, |
|
"learning_rate": 5e-05, |
|
"loss": 2.4778, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0024783147459727386, |
|
"eval_loss": 2.6211066246032715, |
|
"eval_runtime": 17.2492, |
|
"eval_samples_per_second": 98.497, |
|
"eval_steps_per_second": 12.348, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002973977695167286, |
|
"grad_norm": 1.0329855680465698, |
|
"learning_rate": 6e-05, |
|
"loss": 3.06, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003469640644361834, |
|
"grad_norm": 0.7987164855003357, |
|
"learning_rate": 7e-05, |
|
"loss": 2.5097, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003965303593556382, |
|
"grad_norm": 0.8003743886947632, |
|
"learning_rate": 8e-05, |
|
"loss": 2.4667, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0044609665427509295, |
|
"grad_norm": 0.7125903367996216, |
|
"learning_rate": 9e-05, |
|
"loss": 2.2826, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004956629491945477, |
|
"grad_norm": 0.8778437376022339, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7076, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004956629491945477, |
|
"eval_loss": 2.435589075088501, |
|
"eval_runtime": 17.2728, |
|
"eval_samples_per_second": 98.363, |
|
"eval_steps_per_second": 12.332, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005452292441140025, |
|
"grad_norm": 0.7835959196090698, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 2.587, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005947955390334572, |
|
"grad_norm": 0.6849549412727356, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 2.4522, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00644361833952912, |
|
"grad_norm": 0.7166538238525391, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 2.4591, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006939281288723668, |
|
"grad_norm": 0.5679525136947632, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0319, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007434944237918215, |
|
"grad_norm": 0.6390686631202698, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.2686, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007434944237918215, |
|
"eval_loss": 2.1834659576416016, |
|
"eval_runtime": 17.2849, |
|
"eval_samples_per_second": 98.294, |
|
"eval_steps_per_second": 12.323, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007930607187112764, |
|
"grad_norm": 0.6615875959396362, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 2.2409, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00842627013630731, |
|
"grad_norm": 0.5758230686187744, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 1.8906, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008921933085501859, |
|
"grad_norm": 0.6313810348510742, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.0695, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009417596034696406, |
|
"grad_norm": 0.4957845211029053, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 1.7364, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009913258983890954, |
|
"grad_norm": 0.6371930837631226, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.0556, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009913258983890954, |
|
"eval_loss": 1.9821568727493286, |
|
"eval_runtime": 17.2924, |
|
"eval_samples_per_second": 98.251, |
|
"eval_steps_per_second": 12.318, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010408921933085501, |
|
"grad_norm": 0.6001903414726257, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 1.9351, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01090458488228005, |
|
"grad_norm": 0.654202401638031, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.889, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011400247831474598, |
|
"grad_norm": 0.6593927145004272, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 2.0221, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011895910780669145, |
|
"grad_norm": 0.48587918281555176, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 1.5167, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012391573729863693, |
|
"grad_norm": 0.5013470649719238, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.581, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012391573729863693, |
|
"eval_loss": 1.8297536373138428, |
|
"eval_runtime": 17.273, |
|
"eval_samples_per_second": 98.361, |
|
"eval_steps_per_second": 12.331, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 395687120338944.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|