|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07163323782234957, |
|
"eval_steps": 9, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0028653295128939827, |
|
"grad_norm": 0.18870845437049866, |
|
"learning_rate": 1e-05, |
|
"loss": 10.3647, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0028653295128939827, |
|
"eval_loss": 10.36025333404541, |
|
"eval_runtime": 1.0201, |
|
"eval_samples_per_second": 288.212, |
|
"eval_steps_per_second": 36.272, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0057306590257879654, |
|
"grad_norm": 0.21544590592384338, |
|
"learning_rate": 2e-05, |
|
"loss": 10.362, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.008595988538681949, |
|
"grad_norm": 0.21012459695339203, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3787, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.011461318051575931, |
|
"grad_norm": 0.17221978306770325, |
|
"learning_rate": 4e-05, |
|
"loss": 10.3589, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.014326647564469915, |
|
"grad_norm": 0.19953270256519318, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3518, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.017191977077363897, |
|
"grad_norm": 0.2154458910226822, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3715, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02005730659025788, |
|
"grad_norm": 0.18448343873023987, |
|
"learning_rate": 7e-05, |
|
"loss": 10.3615, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.022922636103151862, |
|
"grad_norm": 0.17267638444900513, |
|
"learning_rate": 8e-05, |
|
"loss": 10.3744, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.025787965616045846, |
|
"grad_norm": 0.21264782547950745, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3635, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.025787965616045846, |
|
"eval_loss": 10.357918739318848, |
|
"eval_runtime": 1.0313, |
|
"eval_samples_per_second": 285.087, |
|
"eval_steps_per_second": 35.878, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02865329512893983, |
|
"grad_norm": 0.20090581476688385, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3682, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03151862464183381, |
|
"grad_norm": 0.2009212225675583, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 10.3545, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.034383954154727794, |
|
"grad_norm": 0.17764975130558014, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.3667, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03724928366762178, |
|
"grad_norm": 0.19756364822387695, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 10.3524, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04011461318051576, |
|
"grad_norm": 0.21011871099472046, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.3613, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04297994269340974, |
|
"grad_norm": 0.20613764226436615, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.3708, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.045845272206303724, |
|
"grad_norm": 0.27395445108413696, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 10.347, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04871060171919771, |
|
"grad_norm": 0.20352178812026978, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 10.3532, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05157593123209169, |
|
"grad_norm": 0.2129252851009369, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.354, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05157593123209169, |
|
"eval_loss": 10.35179328918457, |
|
"eval_runtime": 1.0103, |
|
"eval_samples_per_second": 290.994, |
|
"eval_steps_per_second": 36.622, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.054441260744985676, |
|
"grad_norm": 0.1703406721353531, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 10.3596, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05730659025787966, |
|
"grad_norm": 0.2214614599943161, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 10.3457, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06017191977077364, |
|
"grad_norm": 0.2195059061050415, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 10.3563, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06303724928366762, |
|
"grad_norm": 0.21687962114810944, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 10.3525, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0659025787965616, |
|
"grad_norm": 0.20921839773654938, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 10.3405, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06876790830945559, |
|
"grad_norm": 0.21130795776844025, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.3536, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07163323782234957, |
|
"grad_norm": 0.22695602476596832, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3385, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1307561164800.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|