kokovova's picture
Training in progress, step 75, checkpoint
3730263 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.015194489465153971,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002025931928687196,
"grad_norm": NaN,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.0002025931928687196,
"eval_loss": NaN,
"eval_runtime": 71.975,
"eval_samples_per_second": 28.885,
"eval_steps_per_second": 14.449,
"step": 1
},
{
"epoch": 0.0004051863857374392,
"grad_norm": NaN,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.0006077795786061589,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.0008103727714748784,
"grad_norm": NaN,
"learning_rate": 4.997620553954645e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.001012965964343598,
"grad_norm": NaN,
"learning_rate": 4.990486745229364e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.0012155591572123178,
"grad_norm": NaN,
"learning_rate": 4.9786121534345265e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.0014181523500810373,
"grad_norm": NaN,
"learning_rate": 4.962019382530521e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.0016207455429497568,
"grad_norm": NaN,
"learning_rate": 4.940740017799833e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.0018233387358184765,
"grad_norm": NaN,
"learning_rate": 4.914814565722671e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.002025931928687196,
"grad_norm": NaN,
"learning_rate": 4.884292376870567e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.002228525121555916,
"grad_norm": NaN,
"learning_rate": 4.849231551964771e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.0024311183144246355,
"grad_norm": NaN,
"learning_rate": 4.8096988312782174e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.002633711507293355,
"grad_norm": NaN,
"learning_rate": 4.765769467591625e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.0028363047001620746,
"grad_norm": NaN,
"learning_rate": 4.717527082945554e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.0030388978930307943,
"grad_norm": NaN,
"learning_rate": 4.665063509461097e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.0032414910858995136,
"grad_norm": NaN,
"learning_rate": 4.608478614532215e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.0034440842787682334,
"grad_norm": NaN,
"learning_rate": 4.54788011072248e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.003646677471636953,
"grad_norm": NaN,
"learning_rate": 4.4833833507280884e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.0038492706645056724,
"grad_norm": NaN,
"learning_rate": 4.415111107797445e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.004051863857374392,
"grad_norm": NaN,
"learning_rate": 4.34319334202531e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.0042544570502431114,
"grad_norm": NaN,
"learning_rate": 4.267766952966369e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.004457050243111832,
"grad_norm": NaN,
"learning_rate": 4.188975519039151e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.004659643435980551,
"grad_norm": NaN,
"learning_rate": 4.1069690242163484e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.004862236628849271,
"grad_norm": NaN,
"learning_rate": 4.021903572521802e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.00506482982171799,
"grad_norm": NaN,
"learning_rate": 3.933941090877615e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.00506482982171799,
"eval_loss": NaN,
"eval_runtime": 71.6128,
"eval_samples_per_second": 29.031,
"eval_steps_per_second": 14.523,
"step": 25
},
{
"epoch": 0.00526742301458671,
"grad_norm": NaN,
"learning_rate": 3.84324902086706e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.00547001620745543,
"grad_norm": NaN,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.005672609400324149,
"grad_norm": NaN,
"learning_rate": 3.654371533087586e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.0058752025931928685,
"grad_norm": NaN,
"learning_rate": 3.556545654351749e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.006077795786061589,
"grad_norm": NaN,
"learning_rate": 3.456708580912725e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.006280388978930308,
"grad_norm": NaN,
"learning_rate": 3.355050358314172e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.006482982171799027,
"grad_norm": NaN,
"learning_rate": 3.251764498760683e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.006685575364667747,
"grad_norm": NaN,
"learning_rate": 3.147047612756302e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.006888168557536467,
"grad_norm": NaN,
"learning_rate": 3.0410990348452573e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.007090761750405186,
"grad_norm": NaN,
"learning_rate": 2.9341204441673266e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.007293354943273906,
"grad_norm": NaN,
"learning_rate": 2.8263154805501297e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.0074959481361426255,
"grad_norm": NaN,
"learning_rate": 2.717889356869146e-05,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.007698541329011345,
"grad_norm": NaN,
"learning_rate": 2.6090484684133404e-05,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.007901134521880064,
"grad_norm": NaN,
"learning_rate": 2.5e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.008103727714748784,
"grad_norm": NaN,
"learning_rate": 2.3909515315866605e-05,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.008306320907617504,
"grad_norm": NaN,
"learning_rate": 2.2821106431308544e-05,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.008508914100486223,
"grad_norm": NaN,
"learning_rate": 2.173684519449872e-05,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.008711507293354943,
"grad_norm": NaN,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.008914100486223663,
"grad_norm": NaN,
"learning_rate": 1.958900965154743e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.009116693679092382,
"grad_norm": NaN,
"learning_rate": 1.852952387243698e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.009319286871961102,
"grad_norm": NaN,
"learning_rate": 1.7482355012393177e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.009521880064829822,
"grad_norm": NaN,
"learning_rate": 1.6449496416858284e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.009724473257698542,
"grad_norm": NaN,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.00992706645056726,
"grad_norm": NaN,
"learning_rate": 1.443454345648252e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.01012965964343598,
"grad_norm": NaN,
"learning_rate": 1.3456284669124158e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.01012965964343598,
"eval_loss": NaN,
"eval_runtime": 71.5185,
"eval_samples_per_second": 29.069,
"eval_steps_per_second": 14.542,
"step": 50
},
{
"epoch": 0.010332252836304701,
"grad_norm": NaN,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.01053484602917342,
"grad_norm": NaN,
"learning_rate": 1.1567509791329401e-05,
"loss": 0.0,
"step": 52
},
{
"epoch": 0.01073743922204214,
"grad_norm": NaN,
"learning_rate": 1.0660589091223855e-05,
"loss": 0.0,
"step": 53
},
{
"epoch": 0.01094003241491086,
"grad_norm": NaN,
"learning_rate": 9.780964274781984e-06,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.011142625607779578,
"grad_norm": NaN,
"learning_rate": 8.930309757836517e-06,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.011345218800648298,
"grad_norm": NaN,
"learning_rate": 8.110244809608495e-06,
"loss": 0.0,
"step": 56
},
{
"epoch": 0.011547811993517018,
"grad_norm": NaN,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.011750405186385737,
"grad_norm": NaN,
"learning_rate": 6.568066579746901e-06,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.011952998379254457,
"grad_norm": NaN,
"learning_rate": 5.848888922025553e-06,
"loss": 0.0,
"step": 59
},
{
"epoch": 0.012155591572123177,
"grad_norm": NaN,
"learning_rate": 5.166166492719124e-06,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.012358184764991896,
"grad_norm": NaN,
"learning_rate": 4.521198892775203e-06,
"loss": 0.0,
"step": 61
},
{
"epoch": 0.012560777957860616,
"grad_norm": NaN,
"learning_rate": 3.9152138546778625e-06,
"loss": 0.0,
"step": 62
},
{
"epoch": 0.012763371150729336,
"grad_norm": NaN,
"learning_rate": 3.3493649053890326e-06,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.012965964343598054,
"grad_norm": NaN,
"learning_rate": 2.8247291705444575e-06,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.013168557536466775,
"grad_norm": NaN,
"learning_rate": 2.3423053240837515e-06,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.013371150729335495,
"grad_norm": NaN,
"learning_rate": 1.9030116872178316e-06,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.013573743922204213,
"grad_norm": NaN,
"learning_rate": 1.5076844803522922e-06,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.013776337115072933,
"grad_norm": NaN,
"learning_rate": 1.1570762312943295e-06,
"loss": 0.0,
"step": 68
},
{
"epoch": 0.013978930307941654,
"grad_norm": NaN,
"learning_rate": 8.51854342773295e-07,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.014181523500810372,
"grad_norm": NaN,
"learning_rate": 5.925998220016659e-07,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.014384116693679092,
"grad_norm": NaN,
"learning_rate": 3.7980617469479953e-07,
"loss": 0.0,
"step": 71
},
{
"epoch": 0.014586709886547812,
"grad_norm": NaN,
"learning_rate": 2.1387846565474045e-07,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.01478930307941653,
"grad_norm": NaN,
"learning_rate": 9.513254770636137e-08,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.014991896272285251,
"grad_norm": NaN,
"learning_rate": 2.3794460453555047e-08,
"loss": 0.0,
"step": 74
},
{
"epoch": 0.015194489465153971,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.015194489465153971,
"eval_loss": NaN,
"eval_runtime": 71.4978,
"eval_samples_per_second": 29.078,
"eval_steps_per_second": 14.546,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7865367200268288.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}