kokovova's picture
Training in progress, step 20, checkpoint
c5ef3b0 verified
raw
history blame
5.96 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0006168841183183739,
"eval_steps": 2,
"global_step": 20,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.0844205915918693e-05,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 3.0844205915918693e-05,
"eval_loss": NaN,
"eval_runtime": 468.8432,
"eval_samples_per_second": 29.116,
"eval_steps_per_second": 14.559,
"step": 1
},
{
"epoch": 6.168841183183739e-05,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 6.168841183183739e-05,
"eval_loss": NaN,
"eval_runtime": 467.4001,
"eval_samples_per_second": 29.206,
"eval_steps_per_second": 14.604,
"step": 2
},
{
"epoch": 9.253261774775608e-05,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.00012337682366367477,
"grad_norm": NaN,
"learning_rate": 8e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.00012337682366367477,
"eval_loss": NaN,
"eval_runtime": 467.7941,
"eval_samples_per_second": 29.182,
"eval_steps_per_second": 14.592,
"step": 4
},
{
"epoch": 0.00015422102957959348,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.00018506523549551216,
"grad_norm": NaN,
"learning_rate": 0.00012,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.00018506523549551216,
"eval_loss": NaN,
"eval_runtime": 467.5617,
"eval_samples_per_second": 29.196,
"eval_steps_per_second": 14.599,
"step": 6
},
{
"epoch": 0.00021590944141143087,
"grad_norm": NaN,
"learning_rate": 0.00014,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.00024675364732734955,
"grad_norm": NaN,
"learning_rate": 0.00016,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.00024675364732734955,
"eval_loss": NaN,
"eval_runtime": 467.3563,
"eval_samples_per_second": 29.209,
"eval_steps_per_second": 14.606,
"step": 8
},
{
"epoch": 0.0002775978532432682,
"grad_norm": NaN,
"learning_rate": 0.00018,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.00030844205915918696,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.00030844205915918696,
"eval_loss": NaN,
"eval_runtime": 467.3596,
"eval_samples_per_second": 29.209,
"eval_steps_per_second": 14.605,
"step": 10
},
{
"epoch": 0.00033928626507510564,
"grad_norm": NaN,
"learning_rate": 0.00019510565162951537,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.0003701304709910243,
"grad_norm": NaN,
"learning_rate": 0.00018090169943749476,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.0003701304709910243,
"eval_loss": NaN,
"eval_runtime": 467.4243,
"eval_samples_per_second": 29.205,
"eval_steps_per_second": 14.603,
"step": 12
},
{
"epoch": 0.00040097467690694305,
"grad_norm": NaN,
"learning_rate": 0.00015877852522924732,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.00043181888282286173,
"grad_norm": NaN,
"learning_rate": 0.00013090169943749476,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.00043181888282286173,
"eval_loss": NaN,
"eval_runtime": 467.5965,
"eval_samples_per_second": 29.194,
"eval_steps_per_second": 14.598,
"step": 14
},
{
"epoch": 0.0004626630887387804,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.0004935072946546991,
"grad_norm": NaN,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.0004935072946546991,
"eval_loss": NaN,
"eval_runtime": 467.5922,
"eval_samples_per_second": 29.194,
"eval_steps_per_second": 14.598,
"step": 16
},
{
"epoch": 0.0005243515005706178,
"grad_norm": NaN,
"learning_rate": 4.12214747707527e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.0005551957064865365,
"grad_norm": NaN,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.0005551957064865365,
"eval_loss": NaN,
"eval_runtime": 467.1377,
"eval_samples_per_second": 29.223,
"eval_steps_per_second": 14.612,
"step": 18
},
{
"epoch": 0.0005860399124024552,
"grad_norm": NaN,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.0006168841183183739,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.0006168841183183739,
"eval_loss": NaN,
"eval_runtime": 467.1363,
"eval_samples_per_second": 29.223,
"eval_steps_per_second": 14.612,
"step": 20
}
],
"logging_steps": 1,
"max_steps": 20,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2046335826001920.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}