|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01531745423910546, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006126981695642184, |
|
"grad_norm": 16.523408889770508, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 6.7324, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006126981695642184, |
|
"eval_loss": 7.338907718658447, |
|
"eval_runtime": 69.8065, |
|
"eval_samples_per_second": 19.697, |
|
"eval_steps_per_second": 9.856, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012253963391284368, |
|
"grad_norm": 18.644798278808594, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 6.6114, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0018380945086926552, |
|
"grad_norm": 15.051867485046387, |
|
"learning_rate": 0.0001, |
|
"loss": 5.7936, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0024507926782568737, |
|
"grad_norm": 15.758862495422363, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 5.8646, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0030634908478210923, |
|
"grad_norm": 12.907825469970703, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 4.3165, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0036761890173853105, |
|
"grad_norm": 13.11896800994873, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.7056, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004288887186949529, |
|
"grad_norm": 11.859108924865723, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 3.0861, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004901585356513747, |
|
"grad_norm": 11.189018249511719, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 3.0677, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005514283526077966, |
|
"grad_norm": 8.765135765075684, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.8499, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006126981695642185, |
|
"grad_norm": 7.158143520355225, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 2.1377, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006739679865206402, |
|
"grad_norm": 7.217545509338379, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.4496, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007352378034770621, |
|
"grad_norm": 7.291643142700195, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.249, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007965076204334839, |
|
"grad_norm": 8.006402015686035, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 2.7193, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008577774373899057, |
|
"grad_norm": 6.830563545227051, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 2.7104, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009190472543463276, |
|
"grad_norm": 5.633250713348389, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.0096, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009803170713027495, |
|
"grad_norm": 4.747769832611084, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 1.9659, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010415868882591713, |
|
"grad_norm": 4.103663921356201, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 1.8876, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011028567052155932, |
|
"grad_norm": 5.507636547088623, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.6484, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01164126522172015, |
|
"grad_norm": 4.795409679412842, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.8157, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01225396339128437, |
|
"grad_norm": 4.361155033111572, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 1.8387, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012866661560848588, |
|
"grad_norm": 4.868300914764404, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.2786, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.013479359730412805, |
|
"grad_norm": 6.176185131072998, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 2.3026, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.014092057899977023, |
|
"grad_norm": 4.714465141296387, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.1449, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.014704756069541242, |
|
"grad_norm": 4.232319355010986, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.418, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01531745423910546, |
|
"grad_norm": 4.79272985458374, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 2.043, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01531745423910546, |
|
"eval_loss": 1.8542509078979492, |
|
"eval_runtime": 69.8889, |
|
"eval_samples_per_second": 19.674, |
|
"eval_steps_per_second": 9.844, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9837753453772800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|