ClarenceDan's picture
Training in progress, step 9, checkpoint
9a5ca5b verified
raw
history blame
3.01 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.001270513499205929,
"eval_steps": 3,
"global_step": 9,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00014116816657843657,
"grad_norm": 4.341556632425636e-06,
"learning_rate": 2e-05,
"loss": 46.0,
"step": 1
},
{
"epoch": 0.00014116816657843657,
"eval_loss": 11.5,
"eval_runtime": 17.9603,
"eval_samples_per_second": 166.089,
"eval_steps_per_second": 83.072,
"step": 1
},
{
"epoch": 0.00028233633315687313,
"grad_norm": 5.458827217807993e-06,
"learning_rate": 4e-05,
"loss": 46.0,
"step": 2
},
{
"epoch": 0.00042350449973530967,
"grad_norm": 1.301592965319287e-05,
"learning_rate": 6e-05,
"loss": 46.0,
"step": 3
},
{
"epoch": 0.00042350449973530967,
"eval_loss": 11.5,
"eval_runtime": 17.9586,
"eval_samples_per_second": 166.104,
"eval_steps_per_second": 83.08,
"step": 3
},
{
"epoch": 0.0005646726663137463,
"grad_norm": 5.268444056127919e-06,
"learning_rate": 8e-05,
"loss": 46.0,
"step": 4
},
{
"epoch": 0.0007058408328921829,
"grad_norm": 6.410667538148118e-06,
"learning_rate": 0.0001,
"loss": 46.0,
"step": 5
},
{
"epoch": 0.0008470089994706193,
"grad_norm": 1.0726905202318449e-05,
"learning_rate": 0.00012,
"loss": 46.0,
"step": 6
},
{
"epoch": 0.0008470089994706193,
"eval_loss": 11.5,
"eval_runtime": 17.9538,
"eval_samples_per_second": 166.148,
"eval_steps_per_second": 83.102,
"step": 6
},
{
"epoch": 0.000988177166049056,
"grad_norm": 5.360675459087361e-06,
"learning_rate": 0.00014,
"loss": 46.0,
"step": 7
},
{
"epoch": 0.0011293453326274925,
"grad_norm": 3.938770987588214e-06,
"learning_rate": 0.00016,
"loss": 46.0,
"step": 8
},
{
"epoch": 0.001270513499205929,
"grad_norm": 2.511269940441707e-06,
"learning_rate": 0.00018,
"loss": 46.0,
"step": 9
},
{
"epoch": 0.001270513499205929,
"eval_loss": 11.5,
"eval_runtime": 18.0409,
"eval_samples_per_second": 165.347,
"eval_steps_per_second": 82.701,
"step": 9
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 353631092736.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}