kokovova's picture
Training in progress, step 50, checkpoint
d545293 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02384642899725766,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004769285799451532,
"grad_norm": 0.425629585981369,
"learning_rate": 2.5e-05,
"loss": 1.5045,
"step": 1
},
{
"epoch": 0.0009538571598903064,
"grad_norm": 0.5127414464950562,
"learning_rate": 5e-05,
"loss": 1.4964,
"step": 2
},
{
"epoch": 0.0014307857398354596,
"grad_norm": 0.8226185441017151,
"learning_rate": 4.994647308096509e-05,
"loss": 1.6284,
"step": 3
},
{
"epoch": 0.0019077143197806128,
"grad_norm": 0.7249281406402588,
"learning_rate": 4.9786121534345265e-05,
"loss": 1.5155,
"step": 4
},
{
"epoch": 0.002384642899725766,
"grad_norm": 0.6889981627464294,
"learning_rate": 4.951963201008076e-05,
"loss": 1.3207,
"step": 5
},
{
"epoch": 0.002861571479670919,
"grad_norm": 0.7409605383872986,
"learning_rate": 4.914814565722671e-05,
"loss": 1.5334,
"step": 6
},
{
"epoch": 0.0033385000596160724,
"grad_norm": 0.8829396367073059,
"learning_rate": 4.867325323737765e-05,
"loss": 1.6189,
"step": 7
},
{
"epoch": 0.0038154286395612256,
"grad_norm": 0.9240784049034119,
"learning_rate": 4.8096988312782174e-05,
"loss": 1.2868,
"step": 8
},
{
"epoch": 0.004292357219506379,
"grad_norm": 1.1313817501068115,
"learning_rate": 4.742181853831721e-05,
"loss": 1.7027,
"step": 9
},
{
"epoch": 0.004769285799451532,
"grad_norm": 0.9213244318962097,
"learning_rate": 4.665063509461097e-05,
"loss": 1.2187,
"step": 10
},
{
"epoch": 0.005246214379396685,
"grad_norm": 1.2456570863723755,
"learning_rate": 4.5786740307563636e-05,
"loss": 1.5233,
"step": 11
},
{
"epoch": 0.005723142959341838,
"grad_norm": 1.2289069890975952,
"learning_rate": 4.4833833507280884e-05,
"loss": 1.4216,
"step": 12
},
{
"epoch": 0.0062000715392869915,
"grad_norm": 1.204435110092163,
"learning_rate": 4.379599518697444e-05,
"loss": 1.4082,
"step": 13
},
{
"epoch": 0.006677000119232145,
"grad_norm": 1.4659150838851929,
"learning_rate": 4.267766952966369e-05,
"loss": 1.3687,
"step": 14
},
{
"epoch": 0.007153928699177298,
"grad_norm": 1.2851946353912354,
"learning_rate": 4.148364537750172e-05,
"loss": 1.0532,
"step": 15
},
{
"epoch": 0.007630857279122451,
"grad_norm": 1.2946377992630005,
"learning_rate": 4.021903572521802e-05,
"loss": 1.8505,
"step": 16
},
{
"epoch": 0.008107785859067605,
"grad_norm": 1.4644392728805542,
"learning_rate": 3.888925582549006e-05,
"loss": 1.5249,
"step": 17
},
{
"epoch": 0.008584714439012759,
"grad_norm": 1.303274393081665,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.2641,
"step": 18
},
{
"epoch": 0.009061643018957911,
"grad_norm": 1.5627962350845337,
"learning_rate": 3.6057217255475034e-05,
"loss": 1.4673,
"step": 19
},
{
"epoch": 0.009538571598903063,
"grad_norm": 2.11969256401062,
"learning_rate": 3.456708580912725e-05,
"loss": 1.7425,
"step": 20
},
{
"epoch": 0.010015500178848218,
"grad_norm": 1.8735617399215698,
"learning_rate": 3.303598663257904e-05,
"loss": 1.6553,
"step": 21
},
{
"epoch": 0.01049242875879337,
"grad_norm": 2.941176176071167,
"learning_rate": 3.147047612756302e-05,
"loss": 2.1402,
"step": 22
},
{
"epoch": 0.010969357338738524,
"grad_norm": 2.47955322265625,
"learning_rate": 2.9877258050403212e-05,
"loss": 1.5304,
"step": 23
},
{
"epoch": 0.011446285918683676,
"grad_norm": 2.28279185295105,
"learning_rate": 2.8263154805501297e-05,
"loss": 1.7279,
"step": 24
},
{
"epoch": 0.01192321449862883,
"grad_norm": 2.429161548614502,
"learning_rate": 2.663507823075358e-05,
"loss": 1.3317,
"step": 25
},
{
"epoch": 0.012400143078573983,
"grad_norm": 2.561760663986206,
"learning_rate": 2.5e-05,
"loss": 2.078,
"step": 26
},
{
"epoch": 0.012877071658519137,
"grad_norm": 2.975694179534912,
"learning_rate": 2.3364921769246423e-05,
"loss": 1.5784,
"step": 27
},
{
"epoch": 0.01335400023846429,
"grad_norm": 3.220482349395752,
"learning_rate": 2.173684519449872e-05,
"loss": 1.7665,
"step": 28
},
{
"epoch": 0.013830928818409444,
"grad_norm": 2.4195404052734375,
"learning_rate": 2.0122741949596797e-05,
"loss": 1.2464,
"step": 29
},
{
"epoch": 0.014307857398354596,
"grad_norm": 3.0891129970550537,
"learning_rate": 1.852952387243698e-05,
"loss": 1.4323,
"step": 30
},
{
"epoch": 0.01478478597829975,
"grad_norm": 2.1057982444763184,
"learning_rate": 1.6964013367420966e-05,
"loss": 1.0373,
"step": 31
},
{
"epoch": 0.015261714558244903,
"grad_norm": 2.6382322311401367,
"learning_rate": 1.5432914190872757e-05,
"loss": 1.2615,
"step": 32
},
{
"epoch": 0.015738643138190057,
"grad_norm": 2.989168405532837,
"learning_rate": 1.3942782744524973e-05,
"loss": 1.8745,
"step": 33
},
{
"epoch": 0.01621557171813521,
"grad_norm": 2.9760780334472656,
"learning_rate": 1.2500000000000006e-05,
"loss": 1.4645,
"step": 34
},
{
"epoch": 0.01669250029808036,
"grad_norm": 3.531989336013794,
"learning_rate": 1.1110744174509952e-05,
"loss": 1.5544,
"step": 35
},
{
"epoch": 0.017169428878025517,
"grad_norm": 3.602961778640747,
"learning_rate": 9.780964274781984e-06,
"loss": 1.4617,
"step": 36
},
{
"epoch": 0.01764635745797067,
"grad_norm": 2.5102806091308594,
"learning_rate": 8.51635462249828e-06,
"loss": 1.4198,
"step": 37
},
{
"epoch": 0.018123286037915822,
"grad_norm": 2.7395095825195312,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.8911,
"step": 38
},
{
"epoch": 0.018600214617860975,
"grad_norm": 2.7568724155426025,
"learning_rate": 6.204004813025568e-06,
"loss": 1.0436,
"step": 39
},
{
"epoch": 0.019077143197806127,
"grad_norm": 5.96933650970459,
"learning_rate": 5.166166492719124e-06,
"loss": 1.5069,
"step": 40
},
{
"epoch": 0.019554071777751283,
"grad_norm": 5.542440414428711,
"learning_rate": 4.213259692436367e-06,
"loss": 1.3813,
"step": 41
},
{
"epoch": 0.020031000357696435,
"grad_norm": 3.1913931369781494,
"learning_rate": 3.3493649053890326e-06,
"loss": 1.0923,
"step": 42
},
{
"epoch": 0.020507928937641588,
"grad_norm": 3.866119384765625,
"learning_rate": 2.578181461682794e-06,
"loss": 1.2939,
"step": 43
},
{
"epoch": 0.02098485751758674,
"grad_norm": 5.512697696685791,
"learning_rate": 1.9030116872178316e-06,
"loss": 1.559,
"step": 44
},
{
"epoch": 0.021461786097531896,
"grad_norm": 6.1839680671691895,
"learning_rate": 1.3267467626223606e-06,
"loss": 1.8948,
"step": 45
},
{
"epoch": 0.021938714677477048,
"grad_norm": 5.496179580688477,
"learning_rate": 8.51854342773295e-07,
"loss": 1.1269,
"step": 46
},
{
"epoch": 0.0224156432574222,
"grad_norm": 5.928053379058838,
"learning_rate": 4.803679899192392e-07,
"loss": 1.4756,
"step": 47
},
{
"epoch": 0.022892571837367353,
"grad_norm": 5.825471878051758,
"learning_rate": 2.1387846565474045e-07,
"loss": 2.1599,
"step": 48
},
{
"epoch": 0.02336950041731251,
"grad_norm": 8.19292163848877,
"learning_rate": 5.352691903491303e-08,
"loss": 1.5091,
"step": 49
},
{
"epoch": 0.02384642899725766,
"grad_norm": 24.69010353088379,
"learning_rate": 0.0,
"loss": 2.3403,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.76735278343127e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}