|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"global_step": 18281, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9452983972430393e-05, |
|
"loss": 1.3461, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.8905967944860785e-05, |
|
"loss": 1.3888, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.835895191729118e-05, |
|
"loss": 1.3245, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.781193588972157e-05, |
|
"loss": 1.3348, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.7264919862151964e-05, |
|
"loss": 1.3438, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6717903834582355e-05, |
|
"loss": 1.3493, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.6170887807012747e-05, |
|
"loss": 1.3611, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.562387177944314e-05, |
|
"loss": 1.3614, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.5076855751873532e-05, |
|
"loss": 1.3719, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.4529839724303922e-05, |
|
"loss": 1.3835, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.3982823696734315e-05, |
|
"loss": 1.3944, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.3435807669164707e-05, |
|
"loss": 1.3957, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.28887916415951e-05, |
|
"loss": 1.4024, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.2341775614025492e-05, |
|
"loss": 1.4204, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.1794759586455884e-05, |
|
"loss": 1.4221, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.1247743558886277e-05, |
|
"loss": 1.4281, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.0700727531316667e-05, |
|
"loss": 1.4418, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.015371150374706e-05, |
|
"loss": 1.442, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.606695476177453e-06, |
|
"loss": 1.4512, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.059679448607846e-06, |
|
"loss": 1.4572, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.512663421038238e-06, |
|
"loss": 1.4636, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.96564739346863e-06, |
|
"loss": 1.4739, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.418631365899022e-06, |
|
"loss": 1.4867, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.871615338329414e-06, |
|
"loss": 1.488, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.324599310759806e-06, |
|
"loss": 1.5029, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 5.777583283190198e-06, |
|
"loss": 1.5143, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.23056725562059e-06, |
|
"loss": 1.5163, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.683551228050982e-06, |
|
"loss": 1.5327, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.136535200481375e-06, |
|
"loss": 1.5273, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.589519172911767e-06, |
|
"loss": 1.5378, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.042503145342159e-06, |
|
"loss": 1.5542, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.495487117772551e-06, |
|
"loss": 1.5605, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.948471090202943e-06, |
|
"loss": 1.5624, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.4014550626333353e-06, |
|
"loss": 1.5738, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.544390350637274e-07, |
|
"loss": 1.587, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.0742300749411957e-07, |
|
"loss": 1.5977, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 18281, |
|
"total_flos": 2326813943992320.0, |
|
"train_runtime": 29351.0857, |
|
"train_samples_per_second": 0.623 |
|
} |
|
], |
|
"max_steps": 18281, |
|
"num_train_epochs": 1, |
|
"total_flos": 2326813943992320.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|