|
{
|
|
"best_metric": 0.06447368860244751,
|
|
"best_model_checkpoint": "runs/beans_outputs/checkpoint-520",
|
|
"epoch": 5.0,
|
|
"eval_steps": 500,
|
|
"global_step": 650,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.07692307692307693,
|
|
"grad_norm": 2.198932409286499,
|
|
"learning_rate": 1.9692307692307696e-05,
|
|
"loss": 1.0245,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.15384615384615385,
|
|
"grad_norm": 1.8804973363876343,
|
|
"learning_rate": 1.9384615384615386e-05,
|
|
"loss": 0.9455,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.23076923076923078,
|
|
"grad_norm": 2.0275747776031494,
|
|
"learning_rate": 1.907692307692308e-05,
|
|
"loss": 0.8402,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.3076923076923077,
|
|
"grad_norm": 2.6169326305389404,
|
|
"learning_rate": 1.876923076923077e-05,
|
|
"loss": 0.6958,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.38461538461538464,
|
|
"grad_norm": 4.97913932800293,
|
|
"learning_rate": 1.8461538461538465e-05,
|
|
"loss": 0.6621,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.46153846153846156,
|
|
"grad_norm": 2.330277681350708,
|
|
"learning_rate": 1.8153846153846155e-05,
|
|
"loss": 0.5817,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.5384615384615384,
|
|
"grad_norm": 1.8199760913848877,
|
|
"learning_rate": 1.784615384615385e-05,
|
|
"loss": 0.5207,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.6153846153846154,
|
|
"grad_norm": 1.2205573320388794,
|
|
"learning_rate": 1.753846153846154e-05,
|
|
"loss": 0.3956,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.6923076923076923,
|
|
"grad_norm": 2.3881430625915527,
|
|
"learning_rate": 1.7230769230769234e-05,
|
|
"loss": 0.354,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.7692307692307693,
|
|
"grad_norm": 3.0776097774505615,
|
|
"learning_rate": 1.6923076923076924e-05,
|
|
"loss": 0.3698,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.8461538461538461,
|
|
"grad_norm": 2.2470011711120605,
|
|
"learning_rate": 1.6615384615384618e-05,
|
|
"loss": 0.3316,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.9230769230769231,
|
|
"grad_norm": 2.575230836868286,
|
|
"learning_rate": 1.630769230769231e-05,
|
|
"loss": 0.247,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"grad_norm": 7.0244927406311035,
|
|
"learning_rate": 1.6000000000000003e-05,
|
|
"loss": 0.2821,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.9624060150375939,
|
|
"eval_loss": 0.21702182292938232,
|
|
"eval_runtime": 4.8414,
|
|
"eval_samples_per_second": 27.471,
|
|
"eval_steps_per_second": 3.511,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.0769230769230769,
|
|
"grad_norm": 1.5459403991699219,
|
|
"learning_rate": 1.5692307692307693e-05,
|
|
"loss": 0.1998,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 1.1538461538461537,
|
|
"grad_norm": 3.0010740756988525,
|
|
"learning_rate": 1.5384615384615387e-05,
|
|
"loss": 0.2238,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 1.2307692307692308,
|
|
"grad_norm": 1.4257588386535645,
|
|
"learning_rate": 1.5076923076923078e-05,
|
|
"loss": 0.2054,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 1.3076923076923077,
|
|
"grad_norm": 0.5266915559768677,
|
|
"learning_rate": 1.4769230769230772e-05,
|
|
"loss": 0.2185,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 1.3846153846153846,
|
|
"grad_norm": 0.6089905500411987,
|
|
"learning_rate": 1.4461538461538462e-05,
|
|
"loss": 0.2053,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 1.4615384615384617,
|
|
"grad_norm": 2.8735594749450684,
|
|
"learning_rate": 1.4153846153846156e-05,
|
|
"loss": 0.1624,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 1.5384615384615383,
|
|
"grad_norm": 0.4562249481678009,
|
|
"learning_rate": 1.3846153846153847e-05,
|
|
"loss": 0.2551,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 1.6153846153846154,
|
|
"grad_norm": 0.5948198437690735,
|
|
"learning_rate": 1.353846153846154e-05,
|
|
"loss": 0.1809,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 1.6923076923076923,
|
|
"grad_norm": 3.139431953430176,
|
|
"learning_rate": 1.3230769230769231e-05,
|
|
"loss": 0.2144,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 1.7692307692307692,
|
|
"grad_norm": 1.9466521739959717,
|
|
"learning_rate": 1.2923076923076925e-05,
|
|
"loss": 0.1757,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 1.8461538461538463,
|
|
"grad_norm": 5.938401699066162,
|
|
"learning_rate": 1.2615384615384616e-05,
|
|
"loss": 0.2557,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 1.9230769230769231,
|
|
"grad_norm": 0.3760318160057068,
|
|
"learning_rate": 1.230769230769231e-05,
|
|
"loss": 0.1325,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 0.34790655970573425,
|
|
"learning_rate": 1.2e-05,
|
|
"loss": 0.1291,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.9699248120300752,
|
|
"eval_loss": 0.12987138330936432,
|
|
"eval_runtime": 1.598,
|
|
"eval_samples_per_second": 83.231,
|
|
"eval_steps_per_second": 10.639,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 2.076923076923077,
|
|
"grad_norm": 0.5149113535881042,
|
|
"learning_rate": 1.1692307692307694e-05,
|
|
"loss": 0.094,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 2.1538461538461537,
|
|
"grad_norm": 0.33900701999664307,
|
|
"learning_rate": 1.1384615384615385e-05,
|
|
"loss": 0.1408,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 2.230769230769231,
|
|
"grad_norm": 9.22519588470459,
|
|
"learning_rate": 1.1076923076923079e-05,
|
|
"loss": 0.1303,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 2.3076923076923075,
|
|
"grad_norm": 14.370689392089844,
|
|
"learning_rate": 1.076923076923077e-05,
|
|
"loss": 0.1368,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 2.3846153846153846,
|
|
"grad_norm": 5.706496238708496,
|
|
"learning_rate": 1.0461538461538463e-05,
|
|
"loss": 0.176,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 2.4615384615384617,
|
|
"grad_norm": 0.3110077381134033,
|
|
"learning_rate": 1.0153846153846154e-05,
|
|
"loss": 0.1567,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 2.5384615384615383,
|
|
"grad_norm": 5.352679252624512,
|
|
"learning_rate": 9.846153846153848e-06,
|
|
"loss": 0.1918,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 2.6153846153846154,
|
|
"grad_norm": 0.9024627804756165,
|
|
"learning_rate": 9.53846153846154e-06,
|
|
"loss": 0.0693,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 2.6923076923076925,
|
|
"grad_norm": 0.35386863350868225,
|
|
"learning_rate": 9.230769230769232e-06,
|
|
"loss": 0.1379,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 2.769230769230769,
|
|
"grad_norm": 11.4584379196167,
|
|
"learning_rate": 8.923076923076925e-06,
|
|
"loss": 0.1184,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 2.8461538461538463,
|
|
"grad_norm": 0.31197383999824524,
|
|
"learning_rate": 8.615384615384617e-06,
|
|
"loss": 0.1491,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 2.9230769230769234,
|
|
"grad_norm": 1.1729108095169067,
|
|
"learning_rate": 8.307692307692309e-06,
|
|
"loss": 0.1374,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"grad_norm": 0.29289594292640686,
|
|
"learning_rate": 8.000000000000001e-06,
|
|
"loss": 0.1379,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.9774436090225563,
|
|
"eval_loss": 0.09719473123550415,
|
|
"eval_runtime": 1.6898,
|
|
"eval_samples_per_second": 78.709,
|
|
"eval_steps_per_second": 10.061,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 3.076923076923077,
|
|
"grad_norm": 2.651108741760254,
|
|
"learning_rate": 7.692307692307694e-06,
|
|
"loss": 0.1398,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 3.1538461538461537,
|
|
"grad_norm": 7.282815933227539,
|
|
"learning_rate": 7.384615384615386e-06,
|
|
"loss": 0.1577,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 3.230769230769231,
|
|
"grad_norm": 9.063953399658203,
|
|
"learning_rate": 7.076923076923078e-06,
|
|
"loss": 0.1402,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 3.3076923076923075,
|
|
"grad_norm": 0.23233899474143982,
|
|
"learning_rate": 6.76923076923077e-06,
|
|
"loss": 0.0767,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 3.3846153846153846,
|
|
"grad_norm": 6.015790939331055,
|
|
"learning_rate": 6.461538461538463e-06,
|
|
"loss": 0.1157,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 3.4615384615384617,
|
|
"grad_norm": 12.260323524475098,
|
|
"learning_rate": 6.153846153846155e-06,
|
|
"loss": 0.0839,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 3.5384615384615383,
|
|
"grad_norm": 4.703426837921143,
|
|
"learning_rate": 5.846153846153847e-06,
|
|
"loss": 0.0988,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 3.6153846153846154,
|
|
"grad_norm": 9.774993896484375,
|
|
"learning_rate": 5.538461538461539e-06,
|
|
"loss": 0.0759,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 3.6923076923076925,
|
|
"grad_norm": 0.22184737026691437,
|
|
"learning_rate": 5.230769230769232e-06,
|
|
"loss": 0.0985,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 3.769230769230769,
|
|
"grad_norm": 0.21117015182971954,
|
|
"learning_rate": 4.923076923076924e-06,
|
|
"loss": 0.0741,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 3.8461538461538463,
|
|
"grad_norm": 0.9010984897613525,
|
|
"learning_rate": 4.615384615384616e-06,
|
|
"loss": 0.1356,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 3.9230769230769234,
|
|
"grad_norm": 0.20677432417869568,
|
|
"learning_rate": 4.307692307692308e-06,
|
|
"loss": 0.1337,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"grad_norm": 0.3007451593875885,
|
|
"learning_rate": 4.000000000000001e-06,
|
|
"loss": 0.0803,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.9849624060150376,
|
|
"eval_loss": 0.06447368860244751,
|
|
"eval_runtime": 1.7388,
|
|
"eval_samples_per_second": 76.491,
|
|
"eval_steps_per_second": 9.777,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 4.076923076923077,
|
|
"grad_norm": 6.4211835861206055,
|
|
"learning_rate": 3.692307692307693e-06,
|
|
"loss": 0.0977,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 4.153846153846154,
|
|
"grad_norm": 1.6094409227371216,
|
|
"learning_rate": 3.384615384615385e-06,
|
|
"loss": 0.1672,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 4.230769230769231,
|
|
"grad_norm": 0.3714386522769928,
|
|
"learning_rate": 3.0769230769230774e-06,
|
|
"loss": 0.1514,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 4.3076923076923075,
|
|
"grad_norm": 1.195460557937622,
|
|
"learning_rate": 2.7692307692307697e-06,
|
|
"loss": 0.0497,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 4.384615384615385,
|
|
"grad_norm": 0.2668437659740448,
|
|
"learning_rate": 2.461538461538462e-06,
|
|
"loss": 0.1348,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 4.461538461538462,
|
|
"grad_norm": 0.26586711406707764,
|
|
"learning_rate": 2.153846153846154e-06,
|
|
"loss": 0.0843,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 4.538461538461538,
|
|
"grad_norm": 8.662138938903809,
|
|
"learning_rate": 1.8461538461538465e-06,
|
|
"loss": 0.0777,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 4.615384615384615,
|
|
"grad_norm": 1.8624422550201416,
|
|
"learning_rate": 1.5384615384615387e-06,
|
|
"loss": 0.0838,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 4.6923076923076925,
|
|
"grad_norm": 5.394564628601074,
|
|
"learning_rate": 1.230769230769231e-06,
|
|
"loss": 0.0872,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 4.769230769230769,
|
|
"grad_norm": 0.24372971057891846,
|
|
"learning_rate": 9.230769230769232e-07,
|
|
"loss": 0.0838,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 4.846153846153846,
|
|
"grad_norm": 0.2118876874446869,
|
|
"learning_rate": 6.153846153846155e-07,
|
|
"loss": 0.0674,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 4.923076923076923,
|
|
"grad_norm": 0.2302178144454956,
|
|
"learning_rate": 3.0769230769230774e-07,
|
|
"loss": 0.0807,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"grad_norm": 0.5646482110023499,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.1123,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.9774436090225563,
|
|
"eval_loss": 0.07908037304878235,
|
|
"eval_runtime": 1.654,
|
|
"eval_samples_per_second": 80.413,
|
|
"eval_steps_per_second": 10.278,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"step": 650,
|
|
"total_flos": 4.006371770595533e+17,
|
|
"train_loss": 0.21960490552278666,
|
|
"train_runtime": 192.2421,
|
|
"train_samples_per_second": 26.893,
|
|
"train_steps_per_second": 3.381
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 650,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 5,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4.006371770595533e+17,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|