|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.42328042328042326, |
|
"eval_steps": 5, |
|
"global_step": 15, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02821869488536155, |
|
"grad_norm": 2.359462261199951, |
|
"learning_rate": 1e-05, |
|
"loss": 6.2205, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02821869488536155, |
|
"eval_loss": 6.2788615226745605, |
|
"eval_runtime": 36.398, |
|
"eval_samples_per_second": 1.648, |
|
"eval_steps_per_second": 0.824, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0564373897707231, |
|
"grad_norm": 2.0334458351135254, |
|
"learning_rate": 2e-05, |
|
"loss": 6.4757, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08465608465608465, |
|
"grad_norm": 2.242079973220825, |
|
"learning_rate": 3e-05, |
|
"loss": 6.3527, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1128747795414462, |
|
"grad_norm": 2.256972312927246, |
|
"learning_rate": 4e-05, |
|
"loss": 6.5204, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.14109347442680775, |
|
"grad_norm": 2.1701977252960205, |
|
"learning_rate": 5e-05, |
|
"loss": 6.0037, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14109347442680775, |
|
"eval_loss": 6.136302471160889, |
|
"eval_runtime": 0.4876, |
|
"eval_samples_per_second": 123.04, |
|
"eval_steps_per_second": 61.52, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1693121693121693, |
|
"grad_norm": 1.918559193611145, |
|
"learning_rate": 6e-05, |
|
"loss": 6.5218, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 2.5789589881896973, |
|
"learning_rate": 7e-05, |
|
"loss": 6.2126, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2257495590828924, |
|
"grad_norm": 2.5044870376586914, |
|
"learning_rate": 8e-05, |
|
"loss": 6.2001, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.25396825396825395, |
|
"grad_norm": 2.7968616485595703, |
|
"learning_rate": 9e-05, |
|
"loss": 6.5004, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2821869488536155, |
|
"grad_norm": 2.528766393661499, |
|
"learning_rate": 0.0001, |
|
"loss": 6.0157, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2821869488536155, |
|
"eval_loss": 5.689663887023926, |
|
"eval_runtime": 0.4947, |
|
"eval_samples_per_second": 121.289, |
|
"eval_steps_per_second": 60.645, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31040564373897706, |
|
"grad_norm": 3.115682601928711, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 6.0316, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3386243386243386, |
|
"grad_norm": 3.406926155090332, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 6.203, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.36684303350970016, |
|
"grad_norm": 3.4907941818237305, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 5.9364, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 3.92707896232605, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 5.4231, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.42328042328042326, |
|
"grad_norm": 3.5260536670684814, |
|
"learning_rate": 5e-05, |
|
"loss": 5.4519, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.42328042328042326, |
|
"eval_loss": 4.813125133514404, |
|
"eval_runtime": 0.5202, |
|
"eval_samples_per_second": 115.351, |
|
"eval_steps_per_second": 57.676, |
|
"step": 15 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1534751869501440.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|