|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.011927836588638736, |
|
"eval_steps": 2, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005963918294319368, |
|
"grad_norm": 1.5163764953613281, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9264, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005963918294319368, |
|
"eval_loss": 0.988461434841156, |
|
"eval_runtime": 77.0662, |
|
"eval_samples_per_second": 9.161, |
|
"eval_steps_per_second": 4.58, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011927836588638736, |
|
"grad_norm": 1.2068827152252197, |
|
"learning_rate": 4e-05, |
|
"loss": 0.8253, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0011927836588638736, |
|
"eval_loss": 0.9857558012008667, |
|
"eval_runtime": 76.5737, |
|
"eval_samples_per_second": 9.22, |
|
"eval_steps_per_second": 4.61, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0017891754882958103, |
|
"grad_norm": 1.454573631286621, |
|
"learning_rate": 6e-05, |
|
"loss": 0.901, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0023855673177277472, |
|
"grad_norm": 1.681971788406372, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0454, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0023855673177277472, |
|
"eval_loss": 0.9374808073043823, |
|
"eval_runtime": 76.5425, |
|
"eval_samples_per_second": 9.224, |
|
"eval_steps_per_second": 4.612, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002981959147159684, |
|
"grad_norm": 1.7881622314453125, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8865, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0035783509765916206, |
|
"grad_norm": 1.8039438724517822, |
|
"learning_rate": 0.00012, |
|
"loss": 0.8906, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0035783509765916206, |
|
"eval_loss": 0.7199013829231262, |
|
"eval_runtime": 76.5601, |
|
"eval_samples_per_second": 9.222, |
|
"eval_steps_per_second": 4.611, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004174742806023557, |
|
"grad_norm": 1.78489351272583, |
|
"learning_rate": 0.00014, |
|
"loss": 0.8658, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0047711346354554944, |
|
"grad_norm": 1.3335764408111572, |
|
"learning_rate": 0.00016, |
|
"loss": 0.5989, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0047711346354554944, |
|
"eval_loss": 0.45117324590682983, |
|
"eval_runtime": 76.584, |
|
"eval_samples_per_second": 9.219, |
|
"eval_steps_per_second": 4.609, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005367526464887431, |
|
"grad_norm": 1.000693440437317, |
|
"learning_rate": 0.00018, |
|
"loss": 0.4353, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.005963918294319368, |
|
"grad_norm": 0.9320959448814392, |
|
"learning_rate": 0.0002, |
|
"loss": 0.4002, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005963918294319368, |
|
"eval_loss": 0.33094078302383423, |
|
"eval_runtime": 76.5684, |
|
"eval_samples_per_second": 9.221, |
|
"eval_steps_per_second": 4.61, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006560310123751305, |
|
"grad_norm": 1.0968209505081177, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.5539, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007156701953183241, |
|
"grad_norm": 0.8019706010818481, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.2537, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007156701953183241, |
|
"eval_loss": 0.24521681666374207, |
|
"eval_runtime": 76.5378, |
|
"eval_samples_per_second": 9.224, |
|
"eval_steps_per_second": 4.612, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007753093782615178, |
|
"grad_norm": 0.8972131609916687, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.188, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008349485612047115, |
|
"grad_norm": 1.388610601425171, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.2539, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008349485612047115, |
|
"eval_loss": 0.20003721117973328, |
|
"eval_runtime": 76.5695, |
|
"eval_samples_per_second": 9.22, |
|
"eval_steps_per_second": 4.61, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008945877441479051, |
|
"grad_norm": 1.2546236515045166, |
|
"learning_rate": 0.0001, |
|
"loss": 0.2859, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009542269270910989, |
|
"grad_norm": 1.2586054801940918, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.2893, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.009542269270910989, |
|
"eval_loss": 0.17982345819473267, |
|
"eval_runtime": 76.5392, |
|
"eval_samples_per_second": 9.224, |
|
"eval_steps_per_second": 4.612, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010138661100342925, |
|
"grad_norm": 1.4489102363586426, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.1875, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.010735052929774861, |
|
"grad_norm": 1.0019066333770752, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.0675, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.010735052929774861, |
|
"eval_loss": 0.15851947665214539, |
|
"eval_runtime": 76.5776, |
|
"eval_samples_per_second": 9.219, |
|
"eval_steps_per_second": 4.61, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0113314447592068, |
|
"grad_norm": 0.7860324382781982, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.1514, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.011927836588638736, |
|
"grad_norm": 0.9038301110267639, |
|
"learning_rate": 0.0, |
|
"loss": 0.178, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011927836588638736, |
|
"eval_loss": 0.1537449210882187, |
|
"eval_runtime": 76.5787, |
|
"eval_samples_per_second": 9.219, |
|
"eval_steps_per_second": 4.61, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.31825833869312e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|