|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 130, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 2.703125, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 2.7432, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": 1.46875, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 2.7967, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 1.5078125, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 2.6184, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.7931067943573, |
|
"eval_runtime": 0.5382, |
|
"eval_samples_per_second": 18.579, |
|
"eval_steps_per_second": 1.858, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.1538461538461537, |
|
"grad_norm": 1.6640625, |
|
"learning_rate": 0.00019985583705641418, |
|
"loss": 2.2437, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 2.046875, |
|
"learning_rate": 0.00019823877374156647, |
|
"loss": 1.9974, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.9230769230769231, |
|
"grad_norm": 1.2734375, |
|
"learning_rate": 0.00019485364419471454, |
|
"loss": 1.8166, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.5962724685668945, |
|
"eval_runtime": 0.5549, |
|
"eval_samples_per_second": 18.021, |
|
"eval_steps_per_second": 1.802, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"grad_norm": 0.65625, |
|
"learning_rate": 0.0001897613727639014, |
|
"loss": 1.6601, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 0.00018305360832480117, |
|
"loss": 1.5365, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.561077356338501, |
|
"eval_runtime": 0.5395, |
|
"eval_samples_per_second": 18.537, |
|
"eval_steps_per_second": 1.854, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 1.4277, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.4615384615384617, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.0001653013984983585, |
|
"loss": 1.3807, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.00015457645101945046, |
|
"loss": 1.3191, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.5480408668518066, |
|
"eval_runtime": 0.5396, |
|
"eval_samples_per_second": 18.531, |
|
"eval_steps_per_second": 1.853, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.230769230769231, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 0.00014286925614030542, |
|
"loss": 1.2714, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.0001303905157574247, |
|
"loss": 1.2521, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.2353, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.534567356109619, |
|
"eval_runtime": 0.5331, |
|
"eval_samples_per_second": 18.76, |
|
"eval_steps_per_second": 1.876, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 5.384615384615385, |
|
"grad_norm": 0.29296875, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 1.1973, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 5.769230769230769, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 9.061590105968208e-05, |
|
"loss": 1.1927, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.5317368507385254, |
|
"eval_runtime": 0.5351, |
|
"eval_samples_per_second": 18.69, |
|
"eval_steps_per_second": 1.869, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 0.265625, |
|
"learning_rate": 7.73740997570278e-05, |
|
"loss": 1.1796, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 6.538461538461538, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 6.453951129574644e-05, |
|
"loss": 1.1608, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 6.923076923076923, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 5.234312799786921e-05, |
|
"loss": 1.1527, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.5331029891967773, |
|
"eval_runtime": 0.536, |
|
"eval_samples_per_second": 18.658, |
|
"eval_steps_per_second": 1.866, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 7.3076923076923075, |
|
"grad_norm": 0.294921875, |
|
"learning_rate": 4.100445599768774e-05, |
|
"loss": 1.1448, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"grad_norm": 0.28515625, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 1.1355, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.535613536834717, |
|
"eval_runtime": 0.5382, |
|
"eval_samples_per_second": 18.582, |
|
"eval_steps_per_second": 1.858, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 8.076923076923077, |
|
"grad_norm": 0.296875, |
|
"learning_rate": 2.1697413758237784e-05, |
|
"loss": 1.1328, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 8.461538461538462, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 1.4076524743778319e-05, |
|
"loss": 1.1259, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 8.846153846153847, |
|
"grad_norm": 0.2734375, |
|
"learning_rate": 8.002055634117578e-06, |
|
"loss": 1.1336, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.535912275314331, |
|
"eval_runtime": 0.5424, |
|
"eval_samples_per_second": 18.437, |
|
"eval_steps_per_second": 1.844, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 3.5833325466437694e-06, |
|
"loss": 1.139, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 9.615384615384615, |
|
"grad_norm": 0.27734375, |
|
"learning_rate": 8.998820754091531e-07, |
|
"loss": 1.1132, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.0, |
|
"loss": 1.1358, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.536184310913086, |
|
"eval_runtime": 0.5396, |
|
"eval_samples_per_second": 18.533, |
|
"eval_steps_per_second": 1.853, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 130, |
|
"total_flos": 7.63585340899328e+16, |
|
"train_loss": 1.4418765141413763, |
|
"train_runtime": 452.7766, |
|
"train_samples_per_second": 13.583, |
|
"train_steps_per_second": 0.287 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 130, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"total_flos": 7.63585340899328e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|