|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.006502479070145493, |
|
"eval_steps": 5, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003251239535072746, |
|
"grad_norm": 0.021640103310346603, |
|
"learning_rate": 1e-05, |
|
"loss": 11.7645, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003251239535072746, |
|
"eval_loss": 11.76447582244873, |
|
"eval_runtime": 249.3031, |
|
"eval_samples_per_second": 41.56, |
|
"eval_steps_per_second": 20.782, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006502479070145493, |
|
"grad_norm": 0.024706723168492317, |
|
"learning_rate": 2e-05, |
|
"loss": 11.7658, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.000975371860521824, |
|
"grad_norm": 0.022929754108190536, |
|
"learning_rate": 3e-05, |
|
"loss": 11.7649, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0013004958140290985, |
|
"grad_norm": 0.022171195596456528, |
|
"learning_rate": 4e-05, |
|
"loss": 11.7648, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0016256197675363732, |
|
"grad_norm": 0.02208411693572998, |
|
"learning_rate": 5e-05, |
|
"loss": 11.7649, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016256197675363732, |
|
"eval_loss": 11.764402389526367, |
|
"eval_runtime": 249.1579, |
|
"eval_samples_per_second": 41.584, |
|
"eval_steps_per_second": 20.794, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.001950743721043648, |
|
"grad_norm": 0.02443164400756359, |
|
"learning_rate": 6e-05, |
|
"loss": 11.7634, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0022758676745509225, |
|
"grad_norm": 0.023470092564821243, |
|
"learning_rate": 7e-05, |
|
"loss": 11.7654, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.002600991628058197, |
|
"grad_norm": 0.023716744035482407, |
|
"learning_rate": 8e-05, |
|
"loss": 11.7644, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.002926115581565472, |
|
"grad_norm": 0.022970277816057205, |
|
"learning_rate": 9e-05, |
|
"loss": 11.7658, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0032512395350727465, |
|
"grad_norm": 0.022244542837142944, |
|
"learning_rate": 0.0001, |
|
"loss": 11.7641, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0032512395350727465, |
|
"eval_loss": 11.764139175415039, |
|
"eval_runtime": 248.9949, |
|
"eval_samples_per_second": 41.611, |
|
"eval_steps_per_second": 20.808, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003576363488580021, |
|
"grad_norm": 0.024796368554234505, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 11.7653, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.003901487442087296, |
|
"grad_norm": 0.024691808968782425, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 11.7642, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00422661139559457, |
|
"grad_norm": 0.026635609567165375, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 11.7643, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004551735349101845, |
|
"grad_norm": 0.024080852046608925, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 11.7639, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00487685930260912, |
|
"grad_norm": 0.021976996213197708, |
|
"learning_rate": 5e-05, |
|
"loss": 11.763, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00487685930260912, |
|
"eval_loss": 11.7637939453125, |
|
"eval_runtime": 248.9817, |
|
"eval_samples_per_second": 41.613, |
|
"eval_steps_per_second": 20.809, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005201983256116394, |
|
"grad_norm": 0.021976066753268242, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 11.7639, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.005527107209623669, |
|
"grad_norm": 0.0250637736171484, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 11.7633, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005852231163130944, |
|
"grad_norm": 0.024703800678253174, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 11.7632, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006177355116638218, |
|
"grad_norm": 0.02371840551495552, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 11.7637, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.006502479070145493, |
|
"grad_norm": 0.027609504759311676, |
|
"learning_rate": 0.0, |
|
"loss": 11.764, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.006502479070145493, |
|
"eval_loss": 11.763700485229492, |
|
"eval_runtime": 248.9974, |
|
"eval_samples_per_second": 41.611, |
|
"eval_steps_per_second": 20.807, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 65184014008320.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|