kokovova's picture
Training in progress, step 75, checkpoint
b36ff97 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2782931354359926,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0037105751391465678,
"grad_norm": NaN,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.0037105751391465678,
"eval_loss": NaN,
"eval_runtime": 11.9596,
"eval_samples_per_second": 9.532,
"eval_steps_per_second": 4.766,
"step": 1
},
{
"epoch": 0.0074211502782931356,
"grad_norm": NaN,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.011131725417439703,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.014842300556586271,
"grad_norm": NaN,
"learning_rate": 4.997620553954645e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.01855287569573284,
"grad_norm": NaN,
"learning_rate": 4.990486745229364e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.022263450834879406,
"grad_norm": NaN,
"learning_rate": 4.9786121534345265e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.025974025974025976,
"grad_norm": NaN,
"learning_rate": 4.962019382530521e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.029684601113172542,
"grad_norm": NaN,
"learning_rate": 4.940740017799833e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.03339517625231911,
"grad_norm": NaN,
"learning_rate": 4.914814565722671e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.03710575139146568,
"grad_norm": NaN,
"learning_rate": 4.884292376870567e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.04081632653061224,
"grad_norm": NaN,
"learning_rate": 4.849231551964771e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.04452690166975881,
"grad_norm": NaN,
"learning_rate": 4.8096988312782174e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.04823747680890538,
"grad_norm": NaN,
"learning_rate": 4.765769467591625e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.05194805194805195,
"grad_norm": NaN,
"learning_rate": 4.717527082945554e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.055658627087198514,
"grad_norm": NaN,
"learning_rate": 4.665063509461097e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.059369202226345084,
"grad_norm": NaN,
"learning_rate": 4.608478614532215e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.06307977736549165,
"grad_norm": NaN,
"learning_rate": 4.54788011072248e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.06679035250463822,
"grad_norm": NaN,
"learning_rate": 4.4833833507280884e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.07050092764378478,
"grad_norm": NaN,
"learning_rate": 4.415111107797445e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.07421150278293136,
"grad_norm": NaN,
"learning_rate": 4.34319334202531e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.07792207792207792,
"grad_norm": NaN,
"learning_rate": 4.267766952966369e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.08163265306122448,
"grad_norm": NaN,
"learning_rate": 4.188975519039151e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.08534322820037106,
"grad_norm": NaN,
"learning_rate": 4.1069690242163484e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.08905380333951762,
"grad_norm": NaN,
"learning_rate": 4.021903572521802e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.09276437847866419,
"grad_norm": NaN,
"learning_rate": 3.933941090877615e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.09276437847866419,
"eval_loss": NaN,
"eval_runtime": 11.9895,
"eval_samples_per_second": 9.508,
"eval_steps_per_second": 4.754,
"step": 25
},
{
"epoch": 0.09647495361781076,
"grad_norm": NaN,
"learning_rate": 3.84324902086706e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.10018552875695733,
"grad_norm": NaN,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.1038961038961039,
"grad_norm": NaN,
"learning_rate": 3.654371533087586e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.10760667903525047,
"grad_norm": NaN,
"learning_rate": 3.556545654351749e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.11131725417439703,
"grad_norm": NaN,
"learning_rate": 3.456708580912725e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.1150278293135436,
"grad_norm": NaN,
"learning_rate": 3.355050358314172e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.11873840445269017,
"grad_norm": NaN,
"learning_rate": 3.251764498760683e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.12244897959183673,
"grad_norm": NaN,
"learning_rate": 3.147047612756302e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.1261595547309833,
"grad_norm": NaN,
"learning_rate": 3.0410990348452573e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.12987012987012986,
"grad_norm": NaN,
"learning_rate": 2.9341204441673266e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.13358070500927643,
"grad_norm": NaN,
"learning_rate": 2.8263154805501297e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.137291280148423,
"grad_norm": NaN,
"learning_rate": 2.717889356869146e-05,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.14100185528756956,
"grad_norm": NaN,
"learning_rate": 2.6090484684133404e-05,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.14471243042671614,
"grad_norm": NaN,
"learning_rate": 2.5e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.14842300556586271,
"grad_norm": NaN,
"learning_rate": 2.3909515315866605e-05,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.15213358070500926,
"grad_norm": NaN,
"learning_rate": 2.2821106431308544e-05,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.15584415584415584,
"grad_norm": NaN,
"learning_rate": 2.173684519449872e-05,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.15955473098330242,
"grad_norm": NaN,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.16326530612244897,
"grad_norm": NaN,
"learning_rate": 1.958900965154743e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.16697588126159554,
"grad_norm": NaN,
"learning_rate": 1.852952387243698e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.17068645640074212,
"grad_norm": NaN,
"learning_rate": 1.7482355012393177e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.17439703153988867,
"grad_norm": NaN,
"learning_rate": 1.6449496416858284e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.17810760667903525,
"grad_norm": NaN,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.18181818181818182,
"grad_norm": NaN,
"learning_rate": 1.443454345648252e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.18552875695732837,
"grad_norm": NaN,
"learning_rate": 1.3456284669124158e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.18552875695732837,
"eval_loss": NaN,
"eval_runtime": 11.9855,
"eval_samples_per_second": 9.512,
"eval_steps_per_second": 4.756,
"step": 50
},
{
"epoch": 0.18923933209647495,
"grad_norm": NaN,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.19294990723562153,
"grad_norm": NaN,
"learning_rate": 1.1567509791329401e-05,
"loss": 0.0,
"step": 52
},
{
"epoch": 0.19666048237476808,
"grad_norm": NaN,
"learning_rate": 1.0660589091223855e-05,
"loss": 0.0,
"step": 53
},
{
"epoch": 0.20037105751391465,
"grad_norm": NaN,
"learning_rate": 9.780964274781984e-06,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.20408163265306123,
"grad_norm": NaN,
"learning_rate": 8.930309757836517e-06,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.2077922077922078,
"grad_norm": NaN,
"learning_rate": 8.110244809608495e-06,
"loss": 0.0,
"step": 56
},
{
"epoch": 0.21150278293135436,
"grad_norm": NaN,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.21521335807050093,
"grad_norm": NaN,
"learning_rate": 6.568066579746901e-06,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.2189239332096475,
"grad_norm": NaN,
"learning_rate": 5.848888922025553e-06,
"loss": 0.0,
"step": 59
},
{
"epoch": 0.22263450834879406,
"grad_norm": NaN,
"learning_rate": 5.166166492719124e-06,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.22634508348794063,
"grad_norm": NaN,
"learning_rate": 4.521198892775203e-06,
"loss": 0.0,
"step": 61
},
{
"epoch": 0.2300556586270872,
"grad_norm": NaN,
"learning_rate": 3.9152138546778625e-06,
"loss": 0.0,
"step": 62
},
{
"epoch": 0.23376623376623376,
"grad_norm": NaN,
"learning_rate": 3.3493649053890326e-06,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.23747680890538034,
"grad_norm": NaN,
"learning_rate": 2.8247291705444575e-06,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.24118738404452691,
"grad_norm": NaN,
"learning_rate": 2.3423053240837515e-06,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.24489795918367346,
"grad_norm": NaN,
"learning_rate": 1.9030116872178316e-06,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.24860853432282004,
"grad_norm": NaN,
"learning_rate": 1.5076844803522922e-06,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.2523191094619666,
"grad_norm": NaN,
"learning_rate": 1.1570762312943295e-06,
"loss": 0.0,
"step": 68
},
{
"epoch": 0.2560296846011132,
"grad_norm": NaN,
"learning_rate": 8.51854342773295e-07,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.2597402597402597,
"grad_norm": NaN,
"learning_rate": 5.925998220016659e-07,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.2634508348794063,
"grad_norm": NaN,
"learning_rate": 3.7980617469479953e-07,
"loss": 0.0,
"step": 71
},
{
"epoch": 0.26716141001855287,
"grad_norm": NaN,
"learning_rate": 2.1387846565474045e-07,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.27087198515769945,
"grad_norm": NaN,
"learning_rate": 9.513254770636137e-08,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.274582560296846,
"grad_norm": NaN,
"learning_rate": 2.3794460453555047e-08,
"loss": 0.0,
"step": 74
},
{
"epoch": 0.2782931354359926,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.2782931354359926,
"eval_loss": NaN,
"eval_runtime": 11.9772,
"eval_samples_per_second": 9.518,
"eval_steps_per_second": 4.759,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.30671428698112e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}