|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.021727322107550243, |
|
"eval_steps": 3, |
|
"global_step": 10, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0021727322107550242, |
|
"grad_norm": 37.16297912597656, |
|
"learning_rate": 2e-05, |
|
"loss": 26.2925, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0021727322107550242, |
|
"eval_loss": 6.362123489379883, |
|
"eval_runtime": 69.8308, |
|
"eval_samples_per_second": 2.778, |
|
"eval_steps_per_second": 1.389, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0043454644215100485, |
|
"grad_norm": 32.820220947265625, |
|
"learning_rate": 4e-05, |
|
"loss": 23.3629, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.006518196632265073, |
|
"grad_norm": 45.58674621582031, |
|
"learning_rate": 6e-05, |
|
"loss": 31.4505, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006518196632265073, |
|
"eval_loss": 6.055120944976807, |
|
"eval_runtime": 68.8054, |
|
"eval_samples_per_second": 2.82, |
|
"eval_steps_per_second": 1.41, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.008690928843020097, |
|
"grad_norm": 33.71705627441406, |
|
"learning_rate": 8e-05, |
|
"loss": 22.7458, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.010863661053775122, |
|
"grad_norm": 26.417665481567383, |
|
"learning_rate": 0.0001, |
|
"loss": 19.5417, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.013036393264530146, |
|
"grad_norm": 31.988718032836914, |
|
"learning_rate": 0.00012, |
|
"loss": 24.3794, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.013036393264530146, |
|
"eval_loss": 4.7808332443237305, |
|
"eval_runtime": 67.3901, |
|
"eval_samples_per_second": 2.879, |
|
"eval_steps_per_second": 1.439, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.015209125475285171, |
|
"grad_norm": 29.322267532348633, |
|
"learning_rate": 0.00014, |
|
"loss": 19.7093, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.017381857686040194, |
|
"grad_norm": 27.37926483154297, |
|
"learning_rate": 0.00016, |
|
"loss": 18.7525, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01955458989679522, |
|
"grad_norm": 28.797134399414062, |
|
"learning_rate": 0.00018, |
|
"loss": 17.7134, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01955458989679522, |
|
"eval_loss": 3.6513190269470215, |
|
"eval_runtime": 68.1618, |
|
"eval_samples_per_second": 2.846, |
|
"eval_steps_per_second": 1.423, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.021727322107550243, |
|
"grad_norm": 34.8036003112793, |
|
"learning_rate": 0.0002, |
|
"loss": 16.0472, |
|
"step": 10 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 10, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 3, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1803185647779840.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|