ClarenceDan's picture
Training in progress, step 6, checkpoint
4f6b9d0 verified
raw
history blame
2.35 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.015247776365946633,
"eval_steps": 3,
"global_step": 6,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025412960609911056,
"grad_norm": 0.2694489061832428,
"learning_rate": 2e-05,
"loss": 5.4593,
"step": 1
},
{
"epoch": 0.0025412960609911056,
"eval_loss": 5.5072832107543945,
"eval_runtime": 5.1676,
"eval_samples_per_second": 32.123,
"eval_steps_per_second": 16.062,
"step": 1
},
{
"epoch": 0.005082592121982211,
"grad_norm": 0.37336480617523193,
"learning_rate": 4e-05,
"loss": 5.4793,
"step": 2
},
{
"epoch": 0.007623888182973317,
"grad_norm": 0.29353266954421997,
"learning_rate": 6e-05,
"loss": 5.4156,
"step": 3
},
{
"epoch": 0.007623888182973317,
"eval_loss": 5.5053815841674805,
"eval_runtime": 4.0708,
"eval_samples_per_second": 40.778,
"eval_steps_per_second": 20.389,
"step": 3
},
{
"epoch": 0.010165184243964422,
"grad_norm": 0.4207238256931305,
"learning_rate": 8e-05,
"loss": 5.4398,
"step": 4
},
{
"epoch": 0.012706480304955527,
"grad_norm": 0.40501707792282104,
"learning_rate": 0.0001,
"loss": 5.4969,
"step": 5
},
{
"epoch": 0.015247776365946633,
"grad_norm": 0.26661601662635803,
"learning_rate": 0.00012,
"loss": 5.4325,
"step": 6
},
{
"epoch": 0.015247776365946633,
"eval_loss": 5.472946643829346,
"eval_runtime": 4.1557,
"eval_samples_per_second": 39.945,
"eval_steps_per_second": 19.972,
"step": 6
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 445644313460736.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}