|
{ |
|
"best_metric": 0.8517706990242004, |
|
"best_model_checkpoint": "./output/training_results/C017_random_sample_llama3-8b-base_instruct_20240504_182259/checkpoint-40", |
|
"epoch": 4.0, |
|
"eval_steps": 20, |
|
"global_step": 192, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020833333333333332, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.9877, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.10416666666666667, |
|
"grad_norm": 13.88322004479745, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.9606, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.20833333333333334, |
|
"grad_norm": 5.819857619989807, |
|
"learning_rate": 5.25e-06, |
|
"loss": 0.9201, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 5.213327490718728, |
|
"learning_rate": 9e-06, |
|
"loss": 0.8431, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 4.886645368516583, |
|
"learning_rate": 1.275e-05, |
|
"loss": 0.8222, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"eval_loss": 0.8593380451202393, |
|
"eval_runtime": 1.9967, |
|
"eval_samples_per_second": 170.28, |
|
"eval_steps_per_second": 1.502, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5208333333333334, |
|
"grad_norm": 4.291987293679674, |
|
"learning_rate": 1.3195176200175283e-05, |
|
"loss": 0.8289, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 4.031254003708488, |
|
"learning_rate": 9.515676612044427e-06, |
|
"loss": 0.8983, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7291666666666666, |
|
"grad_norm": 3.73543629054737, |
|
"learning_rate": 6.797580677308734e-06, |
|
"loss": 0.8188, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 4.212558198275198, |
|
"learning_rate": 4.808575415542887e-06, |
|
"loss": 0.8014, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"eval_loss": 0.8517706990242004, |
|
"eval_runtime": 1.9645, |
|
"eval_samples_per_second": 173.07, |
|
"eval_steps_per_second": 1.527, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 3.963009491354867, |
|
"learning_rate": 3.3676619069852654e-06, |
|
"loss": 0.8782, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0416666666666667, |
|
"grad_norm": 3.785430208940067, |
|
"learning_rate": 2.334947896124909e-06, |
|
"loss": 0.7259, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1458333333333333, |
|
"grad_norm": 2.9958758287554184, |
|
"learning_rate": 1.603233215095547e-06, |
|
"loss": 0.4621, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 3.5920078433753297, |
|
"learning_rate": 1.0911174606561334e-06, |
|
"loss": 0.4422, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 0.8722255825996399, |
|
"eval_runtime": 1.9622, |
|
"eval_samples_per_second": 173.278, |
|
"eval_steps_per_second": 1.529, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3541666666666667, |
|
"grad_norm": 3.9286205240212144, |
|
"learning_rate": 7.373930741131784e-07, |
|
"loss": 0.4117, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.4583333333333333, |
|
"grad_norm": 4.373192522719682, |
|
"learning_rate": 4.965174334325768e-07, |
|
"loss": 0.4422, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 5.66550951351165, |
|
"learning_rate": 3.349849877937343e-07, |
|
"loss": 0.4432, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 3.7845006550086153, |
|
"learning_rate": 2.2844505627726646e-07, |
|
"loss": 0.4551, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"eval_loss": 0.8555447459220886, |
|
"eval_runtime": 1.9656, |
|
"eval_samples_per_second": 172.972, |
|
"eval_steps_per_second": 1.526, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7708333333333335, |
|
"grad_norm": 3.572304742292393, |
|
"learning_rate": 1.594328760942437e-07, |
|
"loss": 0.4222, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 3.488459009987249, |
|
"learning_rate": 1.156010161291434e-07, |
|
"loss": 0.4309, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.9791666666666665, |
|
"grad_norm": 3.900536398698168, |
|
"learning_rate": 8.835555547373544e-08, |
|
"loss": 0.4793, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"grad_norm": 3.413641170228889, |
|
"learning_rate": 7.181664349277562e-08, |
|
"loss": 0.3806, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"eval_loss": 0.8529651165008545, |
|
"eval_runtime": 1.9631, |
|
"eval_samples_per_second": 173.199, |
|
"eval_steps_per_second": 1.528, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.1875, |
|
"grad_norm": 3.3820950755420904, |
|
"learning_rate": 6.203637972657601e-08, |
|
"loss": 0.3745, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.2916666666666665, |
|
"grad_norm": 3.247185633907683, |
|
"learning_rate": 5.6418543066491835e-08, |
|
"loss": 0.409, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.3958333333333335, |
|
"grad_norm": 3.3611611000807198, |
|
"learning_rate": 5.329471712759216e-08, |
|
"loss": 0.3605, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 4.406858841387693, |
|
"learning_rate": 5.161995210302015e-08, |
|
"loss": 0.4011, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_loss": 0.8576686978340149, |
|
"eval_runtime": 1.9674, |
|
"eval_samples_per_second": 172.821, |
|
"eval_steps_per_second": 1.525, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.6041666666666665, |
|
"grad_norm": 3.4544518361326575, |
|
"learning_rate": 5.075841465580837e-08, |
|
"loss": 0.3872, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.7083333333333335, |
|
"grad_norm": 3.733645772230528, |
|
"learning_rate": 5.033564114946932e-08, |
|
"loss": 0.3853, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.8125, |
|
"grad_norm": 3.40895522907687, |
|
"learning_rate": 5.013915282607116e-08, |
|
"loss": 0.3832, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.9166666666666665, |
|
"grad_norm": 3.2871773154531923, |
|
"learning_rate": 5.005343402153039e-08, |
|
"loss": 0.37, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.9166666666666665, |
|
"eval_loss": 0.8621854782104492, |
|
"eval_runtime": 1.9654, |
|
"eval_samples_per_second": 172.991, |
|
"eval_steps_per_second": 1.526, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.0208333333333335, |
|
"grad_norm": 3.105947874400041, |
|
"learning_rate": 5.001872829857116e-08, |
|
"loss": 0.3799, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 3.27679756840579, |
|
"learning_rate": 5.000587713853837e-08, |
|
"loss": 0.3821, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.2291666666666665, |
|
"grad_norm": 3.2917595668052524, |
|
"learning_rate": 5.0001608748597456e-08, |
|
"loss": 0.3799, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 4.375036973754038, |
|
"learning_rate": 5.0000370319656156e-08, |
|
"loss": 0.3626, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"eval_loss": 0.8658773303031921, |
|
"eval_runtime": 1.9678, |
|
"eval_samples_per_second": 172.779, |
|
"eval_steps_per_second": 1.525, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.4375, |
|
"grad_norm": 3.315514407544336, |
|
"learning_rate": 5.0000067945715855e-08, |
|
"loss": 0.3646, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.5416666666666665, |
|
"grad_norm": 3.562684382929414, |
|
"learning_rate": 5.0000009144677036e-08, |
|
"loss": 0.347, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.6458333333333335, |
|
"grad_norm": 3.3664300561373466, |
|
"learning_rate": 5.0000000785521776e-08, |
|
"loss": 0.3734, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 3.499144173589757, |
|
"learning_rate": 5.000000003317662e-08, |
|
"loss": 0.3708, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_loss": 0.8686714768409729, |
|
"eval_runtime": 1.965, |
|
"eval_samples_per_second": 173.025, |
|
"eval_steps_per_second": 1.527, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.8541666666666665, |
|
"grad_norm": 3.7255205262291726, |
|
"learning_rate": 5.000000000038355e-08, |
|
"loss": 0.3746, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 3.9583333333333335, |
|
"grad_norm": 3.1641205572409237, |
|
"learning_rate": 5.000000000000018e-08, |
|
"loss": 0.3734, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 192, |
|
"total_flos": 5360344104960.0, |
|
"train_loss": 0.5152150879924496, |
|
"train_runtime": 1052.2097, |
|
"train_samples_per_second": 11.61, |
|
"train_steps_per_second": 0.182 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 192, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 20, |
|
"total_flos": 5360344104960.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|