|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.31358073359972766, |
|
"global_step": 17500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.985067584114299e-05, |
|
"loss": 2.6076, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.970135168228597e-05, |
|
"loss": 2.5834, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.955202752342896e-05, |
|
"loss": 2.5955, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.940270336457195e-05, |
|
"loss": 2.5908, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.925337920571494e-05, |
|
"loss": 2.5974, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9104055046857924e-05, |
|
"loss": 3.0537, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.895473088800091e-05, |
|
"loss": 3.0324, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.88054067291439e-05, |
|
"loss": 3.0167, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.865608257028688e-05, |
|
"loss": 3.0234, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.850675841142987e-05, |
|
"loss": 3.0113, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.835743425257286e-05, |
|
"loss": 3.0151, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.8208110093715845e-05, |
|
"loss": 2.9967, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.805878593485883e-05, |
|
"loss": 2.998, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.790946177600182e-05, |
|
"loss": 2.9781, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.776013761714481e-05, |
|
"loss": 2.9997, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.761081345828779e-05, |
|
"loss": 2.9812, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.746148929943078e-05, |
|
"loss": 2.97, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.7312165140573766e-05, |
|
"loss": 2.9958, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.7162840981716754e-05, |
|
"loss": 2.9818, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.7013516822859735e-05, |
|
"loss": 2.973, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.686419266400272e-05, |
|
"loss": 2.9652, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.671486850514572e-05, |
|
"loss": 2.9503, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.65655443462887e-05, |
|
"loss": 2.9483, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.641622018743169e-05, |
|
"loss": 2.9592, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.6266896028574675e-05, |
|
"loss": 2.9615, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.611757186971766e-05, |
|
"loss": 2.9652, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.5968247710860644e-05, |
|
"loss": 2.9406, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.581892355200363e-05, |
|
"loss": 2.9328, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.566959939314662e-05, |
|
"loss": 2.939, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.552027523428961e-05, |
|
"loss": 2.9252, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.5370951075432596e-05, |
|
"loss": 2.9352, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.5221626916575584e-05, |
|
"loss": 2.9564, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.5072302757718565e-05, |
|
"loss": 2.943, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.492297859886155e-05, |
|
"loss": 2.9285, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.477365444000454e-05, |
|
"loss": 2.929, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"step": 17500, |
|
"total_flos": 3.250452430848e+16, |
|
"train_loss": 0.0, |
|
"train_runtime": 72.3486, |
|
"train_samples_per_second": 505.635, |
|
"train_steps_per_second": 63.235 |
|
} |
|
], |
|
"max_steps": 4575, |
|
"num_train_epochs": 3, |
|
"total_flos": 3.250452430848e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|