prxy5608's picture
Training in progress, step 49, checkpoint
b9ec4ec verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0153846153846153,
"eval_steps": 50,
"global_step": 49,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06153846153846154,
"grad_norm": 2.5551578998565674,
"learning_rate": 5e-06,
"loss": 1.5015,
"step": 1
},
{
"epoch": 0.06153846153846154,
"eval_loss": 1.9502390623092651,
"eval_runtime": 3.1313,
"eval_samples_per_second": 8.623,
"eval_steps_per_second": 4.471,
"step": 1
},
{
"epoch": 0.12307692307692308,
"grad_norm": 3.457622528076172,
"learning_rate": 1e-05,
"loss": 1.6852,
"step": 2
},
{
"epoch": 0.18461538461538463,
"grad_norm": 3.9540300369262695,
"learning_rate": 1.5e-05,
"loss": 1.9428,
"step": 3
},
{
"epoch": 0.24615384615384617,
"grad_norm": 4.253664016723633,
"learning_rate": 2e-05,
"loss": 2.1933,
"step": 4
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.051302909851074,
"learning_rate": 2.5e-05,
"loss": 1.3646,
"step": 5
},
{
"epoch": 0.36923076923076925,
"grad_norm": 2.010624885559082,
"learning_rate": 3e-05,
"loss": 1.2732,
"step": 6
},
{
"epoch": 0.4307692307692308,
"grad_norm": 2.0207128524780273,
"learning_rate": 3.5e-05,
"loss": 1.3799,
"step": 7
},
{
"epoch": 0.49230769230769234,
"grad_norm": 2.6285526752471924,
"learning_rate": 4e-05,
"loss": 1.5559,
"step": 8
},
{
"epoch": 0.5538461538461539,
"grad_norm": 1.5692611932754517,
"learning_rate": 4.5e-05,
"loss": 1.1225,
"step": 9
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.5529125928878784,
"learning_rate": 5e-05,
"loss": 0.9641,
"step": 10
},
{
"epoch": 0.676923076923077,
"grad_norm": 2.1200125217437744,
"learning_rate": 5.500000000000001e-05,
"loss": 1.1675,
"step": 11
},
{
"epoch": 0.7384615384615385,
"grad_norm": 2.0532970428466797,
"learning_rate": 6e-05,
"loss": 1.2506,
"step": 12
},
{
"epoch": 0.8,
"grad_norm": 1.283276915550232,
"learning_rate": 6.500000000000001e-05,
"loss": 0.9155,
"step": 13
},
{
"epoch": 0.8615384615384616,
"grad_norm": 1.2680752277374268,
"learning_rate": 7e-05,
"loss": 0.9374,
"step": 14
},
{
"epoch": 0.9230769230769231,
"grad_norm": 1.449583888053894,
"learning_rate": 7.500000000000001e-05,
"loss": 1.0173,
"step": 15
},
{
"epoch": 0.9846153846153847,
"grad_norm": 1.8077707290649414,
"learning_rate": 8e-05,
"loss": 1.239,
"step": 16
},
{
"epoch": 1.0461538461538462,
"grad_norm": 6.680090427398682,
"learning_rate": 8.5e-05,
"loss": 1.5489,
"step": 17
},
{
"epoch": 1.1076923076923078,
"grad_norm": 0.9765293002128601,
"learning_rate": 9e-05,
"loss": 0.8236,
"step": 18
},
{
"epoch": 1.1692307692307693,
"grad_norm": 0.8778647780418396,
"learning_rate": 9.5e-05,
"loss": 0.8098,
"step": 19
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.2021186351776123,
"learning_rate": 0.0001,
"loss": 0.945,
"step": 20
},
{
"epoch": 1.2923076923076924,
"grad_norm": 0.8680479526519775,
"learning_rate": 9.970689785771798e-05,
"loss": 0.8755,
"step": 21
},
{
"epoch": 1.353846153846154,
"grad_norm": 0.8105959892272949,
"learning_rate": 9.883102778550434e-05,
"loss": 0.7208,
"step": 22
},
{
"epoch": 1.4153846153846155,
"grad_norm": 0.9586474895477295,
"learning_rate": 9.738265855914013e-05,
"loss": 0.7929,
"step": 23
},
{
"epoch": 1.476923076923077,
"grad_norm": 1.152060627937317,
"learning_rate": 9.537877098354786e-05,
"loss": 0.8189,
"step": 24
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.9032147526741028,
"learning_rate": 9.284285880837946e-05,
"loss": 0.8127,
"step": 25
},
{
"epoch": 1.6,
"grad_norm": 0.8633611798286438,
"learning_rate": 8.980465328528219e-05,
"loss": 0.6254,
"step": 26
},
{
"epoch": 1.6615384615384614,
"grad_norm": 1.0637147426605225,
"learning_rate": 8.629977459615655e-05,
"loss": 0.8617,
"step": 27
},
{
"epoch": 1.7230769230769232,
"grad_norm": 1.1698484420776367,
"learning_rate": 8.236931423909138e-05,
"loss": 0.962,
"step": 28
},
{
"epoch": 1.7846153846153845,
"grad_norm": 1.2579647302627563,
"learning_rate": 7.805935326811912e-05,
"loss": 0.7148,
"step": 29
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.8797979354858398,
"learning_rate": 7.342042203498951e-05,
"loss": 0.6799,
"step": 30
},
{
"epoch": 1.9076923076923076,
"grad_norm": 0.9369248151779175,
"learning_rate": 6.850690776699573e-05,
"loss": 0.7911,
"step": 31
},
{
"epoch": 1.9692307692307693,
"grad_norm": 1.1235183477401733,
"learning_rate": 6.337641692646106e-05,
"loss": 0.861,
"step": 32
},
{
"epoch": 2.0307692307692307,
"grad_norm": 3.2580151557922363,
"learning_rate": 5.808909982763825e-05,
"loss": 1.0444,
"step": 33
},
{
"epoch": 2.0923076923076924,
"grad_norm": 0.6802517771720886,
"learning_rate": 5.270694542927088e-05,
"loss": 0.4869,
"step": 34
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.7931990027427673,
"learning_rate": 4.729305457072913e-05,
"loss": 0.5766,
"step": 35
},
{
"epoch": 2.2153846153846155,
"grad_norm": 0.9201847910881042,
"learning_rate": 4.1910900172361764e-05,
"loss": 0.6061,
"step": 36
},
{
"epoch": 2.276923076923077,
"grad_norm": 0.7804703712463379,
"learning_rate": 3.6623583073538966e-05,
"loss": 0.5625,
"step": 37
},
{
"epoch": 2.3384615384615386,
"grad_norm": 0.7250446081161499,
"learning_rate": 3.149309223300428e-05,
"loss": 0.5647,
"step": 38
},
{
"epoch": 2.4,
"grad_norm": 0.7047279477119446,
"learning_rate": 2.65795779650105e-05,
"loss": 0.4682,
"step": 39
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.9160324335098267,
"learning_rate": 2.194064673188089e-05,
"loss": 0.6123,
"step": 40
},
{
"epoch": 2.523076923076923,
"grad_norm": 0.8389458656311035,
"learning_rate": 1.7630685760908622e-05,
"loss": 0.5937,
"step": 41
},
{
"epoch": 2.5846153846153848,
"grad_norm": 0.7738927602767944,
"learning_rate": 1.3700225403843469e-05,
"loss": 0.4901,
"step": 42
},
{
"epoch": 2.646153846153846,
"grad_norm": 0.8202934861183167,
"learning_rate": 1.0195346714717813e-05,
"loss": 0.5782,
"step": 43
},
{
"epoch": 2.707692307692308,
"grad_norm": 0.9444193243980408,
"learning_rate": 7.157141191620548e-06,
"loss": 0.5208,
"step": 44
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.8349220752716064,
"learning_rate": 4.621229016452156e-06,
"loss": 0.5911,
"step": 45
},
{
"epoch": 2.830769230769231,
"grad_norm": 0.75601726770401,
"learning_rate": 2.6173414408598827e-06,
"loss": 0.5208,
"step": 46
},
{
"epoch": 2.8923076923076922,
"grad_norm": 0.8191360831260681,
"learning_rate": 1.1689722144956671e-06,
"loss": 0.5153,
"step": 47
},
{
"epoch": 2.953846153846154,
"grad_norm": 1.0536468029022217,
"learning_rate": 2.9310214228202013e-07,
"loss": 0.6799,
"step": 48
},
{
"epoch": 3.0153846153846153,
"grad_norm": 8.771407127380371,
"learning_rate": 0.0,
"loss": 1.3893,
"step": 49
}
],
"logging_steps": 1,
"max_steps": 49,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.117332469927117e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}