|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9948186528497409, |
|
"eval_steps": 500, |
|
"global_step": 96, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.407313588435244, |
|
"learning_rate": 2e-05, |
|
"loss": 1.4393, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.44021568832651153, |
|
"learning_rate": 0.0001, |
|
"loss": 1.4151, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.4536750312789675, |
|
"learning_rate": 0.0002, |
|
"loss": 1.3352, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.18522669739061845, |
|
"learning_rate": 0.00019833656768294662, |
|
"loss": 1.1436, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.11940000702730413, |
|
"learning_rate": 0.0001934016108732548, |
|
"loss": 1.0861, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.11067706553060182, |
|
"learning_rate": 0.00018535930890373466, |
|
"loss": 1.0546, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.08326696985890841, |
|
"learning_rate": 0.0001744772182743782, |
|
"loss": 1.0452, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.07551662936007832, |
|
"learning_rate": 0.00016111737140978494, |
|
"loss": 1.0374, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.06940303607219772, |
|
"learning_rate": 0.00014572423233046386, |
|
"loss": 1.0203, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.0646380507229549, |
|
"learning_rate": 0.00012880990993652377, |
|
"loss": 1.0025, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.06370005100951007, |
|
"learning_rate": 0.00011093712083778746, |
|
"loss": 1.0186, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.06034880535670624, |
|
"learning_rate": 9.270046853390925e-05, |
|
"loss": 1.0299, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.06311665876872108, |
|
"learning_rate": 7.470666176083192e-05, |
|
"loss": 1.0156, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.06131237795687744, |
|
"learning_rate": 5.755433011241851e-05, |
|
"loss": 1.0074, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.06522472527006118, |
|
"learning_rate": 4.181410844420474e-05, |
|
"loss": 1.0452, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.06426335241789467, |
|
"learning_rate": 2.800965262420043e-05, |
|
"loss": 1.0238, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.06843282189243624, |
|
"learning_rate": 1.660021821101222e-05, |
|
"loss": 1.0167, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.06475830798907048, |
|
"learning_rate": 7.96538164308407e-06, |
|
"loss": 1.0045, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.06274448627075681, |
|
"learning_rate": 2.392412244407294e-06, |
|
"loss": 1.0133, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.06343293325060437, |
|
"learning_rate": 6.671516297606095e-08, |
|
"loss": 1.0275, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": NaN, |
|
"eval_runtime": 238.4177, |
|
"eval_samples_per_second": 9.689, |
|
"eval_steps_per_second": 2.424, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 96, |
|
"total_flos": 3308484214915072.0, |
|
"train_loss": 1.0698106847703457, |
|
"train_runtime": 2647.5008, |
|
"train_samples_per_second": 8.156, |
|
"train_steps_per_second": 0.036 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 96, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 3308484214915072.0, |
|
"train_batch_size": 14, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|