|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.06571741511500548, |
|
"eval_steps": 5, |
|
"global_step": 15, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004381161007667032, |
|
"grad_norm": 0.28300485014915466, |
|
"learning_rate": 2e-05, |
|
"loss": 0.1534, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004381161007667032, |
|
"eval_loss": 0.178242489695549, |
|
"eval_runtime": 10.3376, |
|
"eval_samples_per_second": 9.383, |
|
"eval_steps_per_second": 4.74, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008762322015334063, |
|
"grad_norm": 0.33447790145874023, |
|
"learning_rate": 4e-05, |
|
"loss": 0.1934, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.013143483023001095, |
|
"grad_norm": 0.27208104729652405, |
|
"learning_rate": 6e-05, |
|
"loss": 0.1603, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.017524644030668127, |
|
"grad_norm": 0.34873825311660767, |
|
"learning_rate": 8e-05, |
|
"loss": 0.1852, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02190580503833516, |
|
"grad_norm": 0.4304073750972748, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1692, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02190580503833516, |
|
"eval_loss": 0.12932729721069336, |
|
"eval_runtime": 10.3651, |
|
"eval_samples_per_second": 9.358, |
|
"eval_steps_per_second": 4.727, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02628696604600219, |
|
"grad_norm": 0.40466296672821045, |
|
"learning_rate": 0.00012, |
|
"loss": 0.114, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03066812705366922, |
|
"grad_norm": 0.3059878349304199, |
|
"learning_rate": 0.00014, |
|
"loss": 0.1037, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03504928806133625, |
|
"grad_norm": 0.20368129014968872, |
|
"learning_rate": 0.00016, |
|
"loss": 0.055, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03943044906900329, |
|
"grad_norm": 0.31183746457099915, |
|
"learning_rate": 0.00018, |
|
"loss": 0.0435, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04381161007667032, |
|
"grad_norm": 0.2398935854434967, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0406, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04381161007667032, |
|
"eval_loss": 0.029431650415062904, |
|
"eval_runtime": 10.3988, |
|
"eval_samples_per_second": 9.328, |
|
"eval_steps_per_second": 4.712, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04819277108433735, |
|
"grad_norm": 0.28297099471092224, |
|
"learning_rate": 0.0001996917333733128, |
|
"loss": 0.0256, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.05257393209200438, |
|
"grad_norm": 0.13293632864952087, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 0.0161, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.056955093099671415, |
|
"grad_norm": 0.238490030169487, |
|
"learning_rate": 0.00019723699203976766, |
|
"loss": 0.0233, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.06133625410733844, |
|
"grad_norm": 0.1766541600227356, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.0222, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.06571741511500548, |
|
"grad_norm": 0.08793684095144272, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.0064, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06571741511500548, |
|
"eval_loss": 0.018376410007476807, |
|
"eval_runtime": 10.3128, |
|
"eval_samples_per_second": 9.406, |
|
"eval_steps_per_second": 4.751, |
|
"step": 15 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.109738548887552e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|