|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0396746677246578, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00039674667724657806, |
|
"eval_loss": 3.170827627182007, |
|
"eval_runtime": 115.3771, |
|
"eval_samples_per_second": 36.792, |
|
"eval_steps_per_second": 4.602, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011902400317397343, |
|
"grad_norm": 0.17367960512638092, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1442, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0023804800634794686, |
|
"grad_norm": 0.18824371695518494, |
|
"learning_rate": 6e-05, |
|
"loss": 3.1126, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0035707200952192026, |
|
"grad_norm": 0.24131116271018982, |
|
"learning_rate": 9e-05, |
|
"loss": 3.054, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0035707200952192026, |
|
"eval_loss": 3.1313693523406982, |
|
"eval_runtime": 116.5092, |
|
"eval_samples_per_second": 36.435, |
|
"eval_steps_per_second": 4.558, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004760960126958937, |
|
"grad_norm": 0.2026357352733612, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 3.131, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005951200158698671, |
|
"grad_norm": 0.20948725938796997, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.9786, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007141440190438405, |
|
"grad_norm": 0.27164265513420105, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 3.058, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.007141440190438405, |
|
"eval_loss": 3.020143985748291, |
|
"eval_runtime": 116.708, |
|
"eval_samples_per_second": 36.373, |
|
"eval_steps_per_second": 4.55, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00833168022217814, |
|
"grad_norm": 0.262277752161026, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.9406, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.009521920253917874, |
|
"grad_norm": 0.2554677426815033, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 3.0048, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.010712160285657607, |
|
"grad_norm": 0.25378328561782837, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.9543, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.010712160285657607, |
|
"eval_loss": 2.9842841625213623, |
|
"eval_runtime": 116.7335, |
|
"eval_samples_per_second": 36.365, |
|
"eval_steps_per_second": 4.549, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.011902400317397342, |
|
"grad_norm": 0.24904771149158478, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.918, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.013092640349137076, |
|
"grad_norm": 0.2184978574514389, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.9945, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01428288038087681, |
|
"grad_norm": 0.2095755785703659, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.9425, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01428288038087681, |
|
"eval_loss": 2.969701051712036, |
|
"eval_runtime": 116.6143, |
|
"eval_samples_per_second": 36.402, |
|
"eval_steps_per_second": 4.553, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.015473120412616545, |
|
"grad_norm": 0.24327190220355988, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.9867, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01666336044435628, |
|
"grad_norm": 0.22029909491539001, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.9565, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.017853600476096014, |
|
"grad_norm": 0.23045001924037933, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 3.0209, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.017853600476096014, |
|
"eval_loss": 2.9599406719207764, |
|
"eval_runtime": 116.7143, |
|
"eval_samples_per_second": 36.371, |
|
"eval_steps_per_second": 4.55, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01904384050783575, |
|
"grad_norm": 0.23049961030483246, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.9193, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02023408053957548, |
|
"grad_norm": 0.23761886358261108, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 2.9561, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.021424320571315214, |
|
"grad_norm": 0.24705606698989868, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.8906, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.021424320571315214, |
|
"eval_loss": 2.9537951946258545, |
|
"eval_runtime": 116.681, |
|
"eval_samples_per_second": 36.381, |
|
"eval_steps_per_second": 4.551, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.02261456060305495, |
|
"grad_norm": 0.24519965052604675, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 2.9874, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.023804800634794683, |
|
"grad_norm": 0.24707931280136108, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.9106, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.024995040666534418, |
|
"grad_norm": 0.2522743046283722, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 3.021, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.024995040666534418, |
|
"eval_loss": 2.949239492416382, |
|
"eval_runtime": 116.7413, |
|
"eval_samples_per_second": 36.362, |
|
"eval_steps_per_second": 4.549, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.026185280698274152, |
|
"grad_norm": 0.25425225496292114, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.9996, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.027375520730013887, |
|
"grad_norm": 0.24507567286491394, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 2.9529, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.02856576076175362, |
|
"grad_norm": 0.2544834613800049, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.9321, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.02856576076175362, |
|
"eval_loss": 2.946359157562256, |
|
"eval_runtime": 116.6581, |
|
"eval_samples_per_second": 36.388, |
|
"eval_steps_per_second": 4.552, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.029756000793493356, |
|
"grad_norm": 0.2728879749774933, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 3.015, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.03094624082523309, |
|
"grad_norm": 0.286771684885025, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.9656, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.03213648085697282, |
|
"grad_norm": 0.2664996087551117, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 2.9185, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.03213648085697282, |
|
"eval_loss": 2.9444425106048584, |
|
"eval_runtime": 116.7288, |
|
"eval_samples_per_second": 36.366, |
|
"eval_steps_per_second": 4.549, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.03332672088871256, |
|
"grad_norm": 0.2648850083351135, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.9719, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.03451696092045229, |
|
"grad_norm": 0.264315128326416, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 2.9859, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.03570720095219203, |
|
"grad_norm": 0.2442656308412552, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.9352, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03570720095219203, |
|
"eval_loss": 2.9436957836151123, |
|
"eval_runtime": 116.5739, |
|
"eval_samples_per_second": 36.415, |
|
"eval_steps_per_second": 4.555, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03689744098393176, |
|
"grad_norm": 0.2788495719432831, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 2.9576, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.0380876810156715, |
|
"grad_norm": 0.2962850332260132, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.9279, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.03927792104741123, |
|
"grad_norm": 0.28805339336395264, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 2.9436, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.03927792104741123, |
|
"eval_loss": 2.9434895515441895, |
|
"eval_runtime": 116.5071, |
|
"eval_samples_per_second": 36.436, |
|
"eval_steps_per_second": 4.558, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.61010665504768e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|