|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10989010989010989, |
|
"eval_steps": 5, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005494505494505495, |
|
"grad_norm": 0.027151547372341156, |
|
"learning_rate": 1e-05, |
|
"loss": 11.9328, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005494505494505495, |
|
"eval_loss": 11.931960105895996, |
|
"eval_runtime": 18.0294, |
|
"eval_samples_per_second": 34.055, |
|
"eval_steps_per_second": 17.028, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01098901098901099, |
|
"grad_norm": 0.027643978595733643, |
|
"learning_rate": 2e-05, |
|
"loss": 11.9319, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.016483516483516484, |
|
"grad_norm": 0.028413433581590652, |
|
"learning_rate": 3e-05, |
|
"loss": 11.9315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02197802197802198, |
|
"grad_norm": 0.03067416325211525, |
|
"learning_rate": 4e-05, |
|
"loss": 11.9323, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.027472527472527472, |
|
"grad_norm": 0.02727838046848774, |
|
"learning_rate": 5e-05, |
|
"loss": 11.932, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.027472527472527472, |
|
"eval_loss": 11.931900024414062, |
|
"eval_runtime": 18.2244, |
|
"eval_samples_per_second": 33.691, |
|
"eval_steps_per_second": 16.846, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03296703296703297, |
|
"grad_norm": 0.02454882673919201, |
|
"learning_rate": 6e-05, |
|
"loss": 11.932, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 0.02835436351597309, |
|
"learning_rate": 7e-05, |
|
"loss": 11.9315, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04395604395604396, |
|
"grad_norm": 0.026226777583360672, |
|
"learning_rate": 8e-05, |
|
"loss": 11.9322, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04945054945054945, |
|
"grad_norm": 0.027679918333888054, |
|
"learning_rate": 9e-05, |
|
"loss": 11.9327, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.054945054945054944, |
|
"grad_norm": 0.025617027655243874, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9329, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.054945054945054944, |
|
"eval_loss": 11.931732177734375, |
|
"eval_runtime": 18.2319, |
|
"eval_samples_per_second": 33.677, |
|
"eval_steps_per_second": 16.839, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06043956043956044, |
|
"grad_norm": 0.02739313431084156, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 11.9323, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06593406593406594, |
|
"grad_norm": 0.024653950706124306, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 11.9318, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 0.027017774060368538, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 11.931, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 0.028646156191825867, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 11.9311, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08241758241758242, |
|
"grad_norm": 0.028420699760317802, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9325, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08241758241758242, |
|
"eval_loss": 11.931500434875488, |
|
"eval_runtime": 18.4245, |
|
"eval_samples_per_second": 33.325, |
|
"eval_steps_per_second": 16.663, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08791208791208792, |
|
"grad_norm": 0.028067054226994514, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 11.9316, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09340659340659341, |
|
"grad_norm": 0.029327698051929474, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 11.9319, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0989010989010989, |
|
"grad_norm": 0.02734130434691906, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 11.9326, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1043956043956044, |
|
"grad_norm": 0.030583491548895836, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 11.933, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"grad_norm": 0.023537058383226395, |
|
"learning_rate": 0.0, |
|
"loss": 11.9314, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"eval_loss": 11.931438446044922, |
|
"eval_runtime": 18.4207, |
|
"eval_samples_per_second": 33.332, |
|
"eval_steps_per_second": 16.666, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 239830302720.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|