|
{ |
|
"best_metric": 6.0799336433410645, |
|
"best_model_checkpoint": "./output_dir/checkpoint-629", |
|
"epoch": 0.9996027016289233, |
|
"eval_steps": 50.0, |
|
"global_step": 629, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.9626450538635254, |
|
"learning_rate": 9.205087440381559e-05, |
|
"loss": 0.8035, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.889542281627655, |
|
"learning_rate": 8.410174880763116e-05, |
|
"loss": 0.3005, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.5076414942741394, |
|
"learning_rate": 7.615262321144675e-05, |
|
"loss": 0.2731, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 1.0295907258987427, |
|
"learning_rate": 6.820349761526232e-05, |
|
"loss": 0.2261, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.3473876416683197, |
|
"learning_rate": 6.0254372019077906e-05, |
|
"loss": 0.1866, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.5202713012695312, |
|
"learning_rate": 5.2305246422893485e-05, |
|
"loss": 0.1937, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.25799375772476196, |
|
"learning_rate": 4.4356120826709064e-05, |
|
"loss": 0.1769, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2543068826198578, |
|
"learning_rate": 3.640699523052464e-05, |
|
"loss": 0.1693, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.5472003221511841, |
|
"learning_rate": 2.8457869634340222e-05, |
|
"loss": 0.1611, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.19463592767715454, |
|
"learning_rate": 2.05087440381558e-05, |
|
"loss": 0.1487, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.7376086711883545, |
|
"learning_rate": 1.2559618441971382e-05, |
|
"loss": 0.1111, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.5419193506240845, |
|
"learning_rate": 4.610492845786963e-06, |
|
"loss": 0.122, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu-1": 78.7702, |
|
"eval_bleu-2": 75.9001, |
|
"eval_bleu-3": 75.2623, |
|
"eval_bleu-4": 74.9418, |
|
"eval_gen_len": 128.0, |
|
"eval_loss": 6.0799336433410645, |
|
"eval_runtime": 5.1216, |
|
"eval_samples_per_second": 2.343, |
|
"eval_steps_per_second": 0.586, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 629, |
|
"total_flos": 8.181451760192717e+16, |
|
"train_loss": 0.23391734473466494, |
|
"train_runtime": 1027.8255, |
|
"train_samples_per_second": 9.795, |
|
"train_steps_per_second": 0.612 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 629, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 8.181451760192717e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|