redpj7B-lora-cnn-dailymail_6000_samples
/
redpj7B-lora-cnn-dailymail-results_6000_samples
/checkpoint-600
/trainer_state.json
{ | |
"best_metric": 1.9635850191116333, | |
"best_model_checkpoint": "./results/redpj7B-lora-cnn-dailymail-results_fine_tune_test/checkpoint-600", | |
"epoch": 0.19900497512437812, | |
"global_step": 600, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.00027082228116710874, | |
"loss": 1.8654, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 0.0002681697612732095, | |
"loss": 1.8124, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.0002655172413793103, | |
"loss": 1.8231, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.0002628647214854111, | |
"loss": 1.7818, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.00026021220159151194, | |
"loss": 1.8446, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 0.0002575596816976127, | |
"loss": 1.9001, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0002549071618037135, | |
"loss": 1.8073, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0002522546419098143, | |
"loss": 1.8506, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.0002496021220159151, | |
"loss": 1.8471, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.0002469496021220159, | |
"loss": 1.8647, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.07, | |
"eval_loss": 1.966022253036499, | |
"eval_runtime": 12002.3952, | |
"eval_samples_per_second": 1.114, | |
"eval_steps_per_second": 0.139, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.00024429708222811666, | |
"loss": 1.8578, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0002416445623342175, | |
"loss": 1.8329, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0002389920424403183, | |
"loss": 1.8119, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0002363395225464191, | |
"loss": 1.8884, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.00023368700265251986, | |
"loss": 1.9077, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00023103448275862065, | |
"loss": 1.8092, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.00022838196286472146, | |
"loss": 1.9237, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.00022572944297082225, | |
"loss": 1.8701, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00022307692307692306, | |
"loss": 1.8933, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.00022042440318302385, | |
"loss": 1.8453, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.13, | |
"eval_loss": 1.9667036533355713, | |
"eval_runtime": 11991.8036, | |
"eval_samples_per_second": 1.115, | |
"eval_steps_per_second": 0.139, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 0.00021777188328912466, | |
"loss": 1.8272, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00021511936339522545, | |
"loss": 1.8149, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.00021246684350132626, | |
"loss": 1.8565, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.00020981432360742705, | |
"loss": 1.8139, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.0002071618037135278, | |
"loss": 1.802, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 0.00020450928381962862, | |
"loss": 1.8795, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0002018567639257294, | |
"loss": 1.8621, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.00019920424403183022, | |
"loss": 1.8363, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 0.000196551724137931, | |
"loss": 1.8746, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.00019389920424403182, | |
"loss": 1.7763, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.2, | |
"eval_loss": 1.9635850191116333, | |
"eval_runtime": 11976.5234, | |
"eval_samples_per_second": 1.116, | |
"eval_steps_per_second": 0.14, | |
"step": 600 | |
} | |
], | |
"max_steps": 9045, | |
"num_train_epochs": 3, | |
"total_flos": 3.74219846516736e+16, | |
"trial_name": null, | |
"trial_params": null | |
} | |