ciloku's picture
Training in progress, step 200, checkpoint
460dfd7 verified
{
"best_metric": 1.7320468425750732,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.9673518742442564,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0048367593712212815,
"grad_norm": 0.9632701277732849,
"learning_rate": 6e-06,
"loss": 2.4015,
"step": 1
},
{
"epoch": 0.0048367593712212815,
"eval_loss": 2.2109501361846924,
"eval_runtime": 17.8916,
"eval_samples_per_second": 19.45,
"eval_steps_per_second": 4.863,
"step": 1
},
{
"epoch": 0.009673518742442563,
"grad_norm": 0.9858382940292358,
"learning_rate": 1.2e-05,
"loss": 2.3818,
"step": 2
},
{
"epoch": 0.014510278113663845,
"grad_norm": 0.9527080059051514,
"learning_rate": 1.8e-05,
"loss": 2.2582,
"step": 3
},
{
"epoch": 0.019347037484885126,
"grad_norm": 0.8105273246765137,
"learning_rate": 2.4e-05,
"loss": 2.1767,
"step": 4
},
{
"epoch": 0.02418379685610641,
"grad_norm": 0.7631579041481018,
"learning_rate": 3e-05,
"loss": 2.0332,
"step": 5
},
{
"epoch": 0.02902055622732769,
"grad_norm": 0.8983649015426636,
"learning_rate": 3.6e-05,
"loss": 2.2603,
"step": 6
},
{
"epoch": 0.03385731559854897,
"grad_norm": 0.7237122058868408,
"learning_rate": 4.2e-05,
"loss": 2.1122,
"step": 7
},
{
"epoch": 0.03869407496977025,
"grad_norm": 0.6886872053146362,
"learning_rate": 4.8e-05,
"loss": 2.1978,
"step": 8
},
{
"epoch": 0.04353083434099154,
"grad_norm": 0.6846583485603333,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.2006,
"step": 9
},
{
"epoch": 0.04836759371221282,
"grad_norm": 0.6080830097198486,
"learning_rate": 6e-05,
"loss": 2.0522,
"step": 10
},
{
"epoch": 0.053204353083434096,
"grad_norm": 0.7755049467086792,
"learning_rate": 5.999589914977407e-05,
"loss": 2.123,
"step": 11
},
{
"epoch": 0.05804111245465538,
"grad_norm": 0.7986268401145935,
"learning_rate": 5.998359772022778e-05,
"loss": 2.0668,
"step": 12
},
{
"epoch": 0.06287787182587666,
"grad_norm": 0.5952561497688293,
"learning_rate": 5.996309907444915e-05,
"loss": 1.9486,
"step": 13
},
{
"epoch": 0.06771463119709795,
"grad_norm": 0.7420691251754761,
"learning_rate": 5.9934408816563236e-05,
"loss": 2.0322,
"step": 14
},
{
"epoch": 0.07255139056831923,
"grad_norm": 0.6334537267684937,
"learning_rate": 5.98975347902001e-05,
"loss": 1.8305,
"step": 15
},
{
"epoch": 0.0773881499395405,
"grad_norm": 0.5374141931533813,
"learning_rate": 5.9852487076350345e-05,
"loss": 2.0767,
"step": 16
},
{
"epoch": 0.08222490931076179,
"grad_norm": 0.54233717918396,
"learning_rate": 5.979927799060915e-05,
"loss": 1.9312,
"step": 17
},
{
"epoch": 0.08706166868198308,
"grad_norm": 0.47292783856391907,
"learning_rate": 5.9737922079809257e-05,
"loss": 1.8548,
"step": 18
},
{
"epoch": 0.09189842805320435,
"grad_norm": 0.5466502904891968,
"learning_rate": 5.9668436118044054e-05,
"loss": 2.0625,
"step": 19
},
{
"epoch": 0.09673518742442563,
"grad_norm": 0.5066276788711548,
"learning_rate": 5.959083910208167e-05,
"loss": 1.8547,
"step": 20
},
{
"epoch": 0.10157194679564692,
"grad_norm": 0.49660593271255493,
"learning_rate": 5.9505152246171474e-05,
"loss": 1.9956,
"step": 21
},
{
"epoch": 0.10640870616686819,
"grad_norm": 0.5254217982292175,
"learning_rate": 5.941139897624428e-05,
"loss": 1.85,
"step": 22
},
{
"epoch": 0.11124546553808948,
"grad_norm": 0.4571835994720459,
"learning_rate": 5.9309604923507984e-05,
"loss": 1.8309,
"step": 23
},
{
"epoch": 0.11608222490931076,
"grad_norm": 0.4921605587005615,
"learning_rate": 5.9199797917440176e-05,
"loss": 1.8846,
"step": 24
},
{
"epoch": 0.12091898428053205,
"grad_norm": 0.4969245195388794,
"learning_rate": 5.908200797817991e-05,
"loss": 1.9426,
"step": 25
},
{
"epoch": 0.12575574365175332,
"grad_norm": 0.5094956159591675,
"learning_rate": 5.895626730832046e-05,
"loss": 2.0461,
"step": 26
},
{
"epoch": 0.13059250302297462,
"grad_norm": 0.48882463574409485,
"learning_rate": 5.882261028410545e-05,
"loss": 1.892,
"step": 27
},
{
"epoch": 0.1354292623941959,
"grad_norm": 0.47303664684295654,
"learning_rate": 5.8681073446030734e-05,
"loss": 2.0175,
"step": 28
},
{
"epoch": 0.14026602176541716,
"grad_norm": 0.5476979613304138,
"learning_rate": 5.853169548885461e-05,
"loss": 1.8809,
"step": 29
},
{
"epoch": 0.14510278113663846,
"grad_norm": 0.4879865050315857,
"learning_rate": 5.8374517251019035e-05,
"loss": 1.8718,
"step": 30
},
{
"epoch": 0.14993954050785974,
"grad_norm": 0.465002179145813,
"learning_rate": 5.820958170348484e-05,
"loss": 1.8935,
"step": 31
},
{
"epoch": 0.154776299879081,
"grad_norm": 0.46331265568733215,
"learning_rate": 5.8036933937983825e-05,
"loss": 1.9589,
"step": 32
},
{
"epoch": 0.1596130592503023,
"grad_norm": 0.4595167934894562,
"learning_rate": 5.7856621154691217e-05,
"loss": 1.8369,
"step": 33
},
{
"epoch": 0.16444981862152358,
"grad_norm": 0.4678586721420288,
"learning_rate": 5.766869264932154e-05,
"loss": 1.9281,
"step": 34
},
{
"epoch": 0.16928657799274485,
"grad_norm": 0.46128371357917786,
"learning_rate": 5.747319979965172e-05,
"loss": 1.8761,
"step": 35
},
{
"epoch": 0.17412333736396615,
"grad_norm": 0.5178216695785522,
"learning_rate": 5.727019605147488e-05,
"loss": 1.9501,
"step": 36
},
{
"epoch": 0.17896009673518742,
"grad_norm": 0.4670916497707367,
"learning_rate": 5.7059736903988775e-05,
"loss": 1.9176,
"step": 37
},
{
"epoch": 0.1837968561064087,
"grad_norm": 0.4867013990879059,
"learning_rate": 5.684187989462291e-05,
"loss": 1.8645,
"step": 38
},
{
"epoch": 0.18863361547763,
"grad_norm": 0.43132489919662476,
"learning_rate": 5.661668458330836e-05,
"loss": 1.7651,
"step": 39
},
{
"epoch": 0.19347037484885127,
"grad_norm": 0.4480936527252197,
"learning_rate": 5.638421253619467e-05,
"loss": 2.0129,
"step": 40
},
{
"epoch": 0.19830713422007254,
"grad_norm": 0.4475274384021759,
"learning_rate": 5.614452730881832e-05,
"loss": 1.9084,
"step": 41
},
{
"epoch": 0.20314389359129384,
"grad_norm": 0.4610969126224518,
"learning_rate": 5.589769442872722e-05,
"loss": 1.878,
"step": 42
},
{
"epoch": 0.2079806529625151,
"grad_norm": 0.467438668012619,
"learning_rate": 5.5643781377566175e-05,
"loss": 1.9073,
"step": 43
},
{
"epoch": 0.21281741233373638,
"grad_norm": 0.47766032814979553,
"learning_rate": 5.538285757262806e-05,
"loss": 1.8999,
"step": 44
},
{
"epoch": 0.21765417170495768,
"grad_norm": 0.5315982103347778,
"learning_rate": 5.5114994347875856e-05,
"loss": 1.9372,
"step": 45
},
{
"epoch": 0.22249093107617895,
"grad_norm": 0.6379398703575134,
"learning_rate": 5.48402649344406e-05,
"loss": 1.9203,
"step": 46
},
{
"epoch": 0.22732769044740025,
"grad_norm": 0.6935904026031494,
"learning_rate": 5.455874444060078e-05,
"loss": 1.9296,
"step": 47
},
{
"epoch": 0.23216444981862153,
"grad_norm": 0.7248342633247375,
"learning_rate": 5.427050983124843e-05,
"loss": 1.9293,
"step": 48
},
{
"epoch": 0.2370012091898428,
"grad_norm": 0.8179011940956116,
"learning_rate": 5.397563990684774e-05,
"loss": 2.0154,
"step": 49
},
{
"epoch": 0.2418379685610641,
"grad_norm": 0.9732866883277893,
"learning_rate": 5.367421528189181e-05,
"loss": 2.0389,
"step": 50
},
{
"epoch": 0.2418379685610641,
"eval_loss": 1.8738901615142822,
"eval_runtime": 18.4039,
"eval_samples_per_second": 18.909,
"eval_steps_per_second": 4.727,
"step": 50
},
{
"epoch": 0.24667472793228537,
"grad_norm": 0.47695332765579224,
"learning_rate": 5.336631836286338e-05,
"loss": 2.0234,
"step": 51
},
{
"epoch": 0.25151148730350664,
"grad_norm": 0.4575190842151642,
"learning_rate": 5.3052033325705774e-05,
"loss": 1.9453,
"step": 52
},
{
"epoch": 0.25634824667472794,
"grad_norm": 0.46312448382377625,
"learning_rate": 5.2731446092810044e-05,
"loss": 1.9204,
"step": 53
},
{
"epoch": 0.26118500604594924,
"grad_norm": 0.4436776340007782,
"learning_rate": 5.240464430952462e-05,
"loss": 1.9036,
"step": 54
},
{
"epoch": 0.2660217654171705,
"grad_norm": 0.42219528555870056,
"learning_rate": 5.207171732019395e-05,
"loss": 1.6588,
"step": 55
},
{
"epoch": 0.2708585247883918,
"grad_norm": 0.5274652242660522,
"learning_rate": 5.1732756143732675e-05,
"loss": 1.9097,
"step": 56
},
{
"epoch": 0.2756952841596131,
"grad_norm": 0.5278433561325073,
"learning_rate": 5.1387853448741916e-05,
"loss": 1.9225,
"step": 57
},
{
"epoch": 0.28053204353083433,
"grad_norm": 0.46141645312309265,
"learning_rate": 5.103710352817465e-05,
"loss": 1.7575,
"step": 58
},
{
"epoch": 0.28536880290205563,
"grad_norm": 0.37459704279899597,
"learning_rate": 5.068060227355698e-05,
"loss": 1.8215,
"step": 59
},
{
"epoch": 0.29020556227327693,
"grad_norm": 0.4300343096256256,
"learning_rate": 5.0318447148772234e-05,
"loss": 1.7966,
"step": 60
},
{
"epoch": 0.2950423216444982,
"grad_norm": 0.44032105803489685,
"learning_rate": 4.995073716341545e-05,
"loss": 1.827,
"step": 61
},
{
"epoch": 0.2998790810157195,
"grad_norm": 0.4639372229576111,
"learning_rate": 4.957757284572506e-05,
"loss": 1.8347,
"step": 62
},
{
"epoch": 0.3047158403869408,
"grad_norm": 0.41214707493782043,
"learning_rate": 4.91990562150995e-05,
"loss": 1.7148,
"step": 63
},
{
"epoch": 0.309552599758162,
"grad_norm": 0.3841429054737091,
"learning_rate": 4.881529075420611e-05,
"loss": 1.8549,
"step": 64
},
{
"epoch": 0.3143893591293833,
"grad_norm": 0.4167156517505646,
"learning_rate": 4.8426381380690036e-05,
"loss": 1.8203,
"step": 65
},
{
"epoch": 0.3192261185006046,
"grad_norm": 0.4834750294685364,
"learning_rate": 4.8032434418490753e-05,
"loss": 1.8424,
"step": 66
},
{
"epoch": 0.32406287787182586,
"grad_norm": 0.42098328471183777,
"learning_rate": 4.7633557568774194e-05,
"loss": 1.8602,
"step": 67
},
{
"epoch": 0.32889963724304716,
"grad_norm": 0.43436023592948914,
"learning_rate": 4.722985988048831e-05,
"loss": 1.7259,
"step": 68
},
{
"epoch": 0.33373639661426846,
"grad_norm": 0.4097272753715515,
"learning_rate": 4.6821451720550184e-05,
"loss": 1.8614,
"step": 69
},
{
"epoch": 0.3385731559854897,
"grad_norm": 0.40257489681243896,
"learning_rate": 4.640844474367282e-05,
"loss": 1.7213,
"step": 70
},
{
"epoch": 0.343409915356711,
"grad_norm": 0.39686596393585205,
"learning_rate": 4.5990951861839815e-05,
"loss": 1.7731,
"step": 71
},
{
"epoch": 0.3482466747279323,
"grad_norm": 0.4416908323764801,
"learning_rate": 4.5569087213436455e-05,
"loss": 1.8476,
"step": 72
},
{
"epoch": 0.35308343409915355,
"grad_norm": 0.42743927240371704,
"learning_rate": 4.514296613204532e-05,
"loss": 1.8935,
"step": 73
},
{
"epoch": 0.35792019347037485,
"grad_norm": 0.4807327687740326,
"learning_rate": 4.471270511491525e-05,
"loss": 1.7099,
"step": 74
},
{
"epoch": 0.36275695284159615,
"grad_norm": 0.48261377215385437,
"learning_rate": 4.427842179111221e-05,
"loss": 2.0033,
"step": 75
},
{
"epoch": 0.3675937122128174,
"grad_norm": 0.4463306665420532,
"learning_rate": 4.3840234889360634e-05,
"loss": 1.7878,
"step": 76
},
{
"epoch": 0.3724304715840387,
"grad_norm": 0.41338077187538147,
"learning_rate": 4.33982642055842e-05,
"loss": 1.7056,
"step": 77
},
{
"epoch": 0.37726723095526,
"grad_norm": 0.48970669507980347,
"learning_rate": 4.2952630570154785e-05,
"loss": 1.767,
"step": 78
},
{
"epoch": 0.38210399032648124,
"grad_norm": 0.47082316875457764,
"learning_rate": 4.250345581485871e-05,
"loss": 1.8836,
"step": 79
},
{
"epoch": 0.38694074969770254,
"grad_norm": 0.40101462602615356,
"learning_rate": 4.205086273958909e-05,
"loss": 1.7996,
"step": 80
},
{
"epoch": 0.39177750906892383,
"grad_norm": 0.49976804852485657,
"learning_rate": 4.1594975078773565e-05,
"loss": 1.8354,
"step": 81
},
{
"epoch": 0.3966142684401451,
"grad_norm": 0.43082740902900696,
"learning_rate": 4.113591746754662e-05,
"loss": 1.7727,
"step": 82
},
{
"epoch": 0.4014510278113664,
"grad_norm": 0.430978924036026,
"learning_rate": 4.06738154076755e-05,
"loss": 1.6518,
"step": 83
},
{
"epoch": 0.4062877871825877,
"grad_norm": 0.5494586825370789,
"learning_rate": 4.020879523324929e-05,
"loss": 1.8428,
"step": 84
},
{
"epoch": 0.4111245465538089,
"grad_norm": 0.5254581570625305,
"learning_rate": 3.974098407614051e-05,
"loss": 1.7367,
"step": 85
},
{
"epoch": 0.4159613059250302,
"grad_norm": 0.45177075266838074,
"learning_rate": 3.927050983124842e-05,
"loss": 1.8426,
"step": 86
},
{
"epoch": 0.4207980652962515,
"grad_norm": 0.5090339779853821,
"learning_rate": 3.8797501121533946e-05,
"loss": 1.8293,
"step": 87
},
{
"epoch": 0.42563482466747277,
"grad_norm": 0.4986412525177002,
"learning_rate": 3.832208726285534e-05,
"loss": 1.7828,
"step": 88
},
{
"epoch": 0.43047158403869407,
"grad_norm": 0.42650216817855835,
"learning_rate": 3.784439822861459e-05,
"loss": 1.7109,
"step": 89
},
{
"epoch": 0.43530834340991537,
"grad_norm": 0.4212074279785156,
"learning_rate": 3.7364564614223976e-05,
"loss": 1.8452,
"step": 90
},
{
"epoch": 0.44014510278113667,
"grad_norm": 0.4842492938041687,
"learning_rate": 3.688271760140255e-05,
"loss": 1.8276,
"step": 91
},
{
"epoch": 0.4449818621523579,
"grad_norm": 0.4585839509963989,
"learning_rate": 3.6398988922312406e-05,
"loss": 1.8796,
"step": 92
},
{
"epoch": 0.4498186215235792,
"grad_norm": 0.48161616921424866,
"learning_rate": 3.591351082354441e-05,
"loss": 1.8683,
"step": 93
},
{
"epoch": 0.4546553808948005,
"grad_norm": 0.5221933722496033,
"learning_rate": 3.54264160299633e-05,
"loss": 1.8533,
"step": 94
},
{
"epoch": 0.45949214026602175,
"grad_norm": 0.5291529893875122,
"learning_rate": 3.493783770842202e-05,
"loss": 1.7899,
"step": 95
},
{
"epoch": 0.46432889963724305,
"grad_norm": 0.6196396350860596,
"learning_rate": 3.444790943135526e-05,
"loss": 1.7449,
"step": 96
},
{
"epoch": 0.46916565900846435,
"grad_norm": 0.7176190614700317,
"learning_rate": 3.3956765140262074e-05,
"loss": 1.8663,
"step": 97
},
{
"epoch": 0.4740024183796856,
"grad_norm": 0.6267790198326111,
"learning_rate": 3.346453910908759e-05,
"loss": 1.7183,
"step": 98
},
{
"epoch": 0.4788391777509069,
"grad_norm": 0.9203118085861206,
"learning_rate": 3.297136590751389e-05,
"loss": 1.8954,
"step": 99
},
{
"epoch": 0.4836759371221282,
"grad_norm": 0.8572964072227478,
"learning_rate": 3.247738036416998e-05,
"loss": 1.7169,
"step": 100
},
{
"epoch": 0.4836759371221282,
"eval_loss": 1.7830288410186768,
"eval_runtime": 18.3415,
"eval_samples_per_second": 18.973,
"eval_steps_per_second": 4.743,
"step": 100
},
{
"epoch": 0.48851269649334944,
"grad_norm": 0.4275000989437103,
"learning_rate": 3.1982717529770985e-05,
"loss": 1.9067,
"step": 101
},
{
"epoch": 0.49334945586457074,
"grad_norm": 0.48399701714515686,
"learning_rate": 3.148751264019667e-05,
"loss": 1.9673,
"step": 102
},
{
"epoch": 0.49818621523579204,
"grad_norm": 0.45135805010795593,
"learning_rate": 3.099190107951924e-05,
"loss": 1.8383,
"step": 103
},
{
"epoch": 0.5030229746070133,
"grad_norm": 0.476682186126709,
"learning_rate": 3.049601834299076e-05,
"loss": 1.8095,
"step": 104
},
{
"epoch": 0.5078597339782346,
"grad_norm": 0.46085941791534424,
"learning_rate": 3e-05,
"loss": 1.6057,
"step": 105
},
{
"epoch": 0.5126964933494559,
"grad_norm": 0.5784292221069336,
"learning_rate": 2.9503981657009246e-05,
"loss": 1.7255,
"step": 106
},
{
"epoch": 0.5175332527206772,
"grad_norm": 0.5619342923164368,
"learning_rate": 2.9008098920480752e-05,
"loss": 1.9666,
"step": 107
},
{
"epoch": 0.5223700120918985,
"grad_norm": 0.5032961368560791,
"learning_rate": 2.851248735980333e-05,
"loss": 1.8762,
"step": 108
},
{
"epoch": 0.5272067714631197,
"grad_norm": 0.4964190721511841,
"learning_rate": 2.801728247022902e-05,
"loss": 1.9043,
"step": 109
},
{
"epoch": 0.532043530834341,
"grad_norm": 0.5014073848724365,
"learning_rate": 2.7522619635830034e-05,
"loss": 1.6769,
"step": 110
},
{
"epoch": 0.5368802902055623,
"grad_norm": 0.5301333665847778,
"learning_rate": 2.702863409248612e-05,
"loss": 1.9171,
"step": 111
},
{
"epoch": 0.5417170495767836,
"grad_norm": 0.5182745456695557,
"learning_rate": 2.6535460890912416e-05,
"loss": 1.8716,
"step": 112
},
{
"epoch": 0.5465538089480049,
"grad_norm": 0.46317240595817566,
"learning_rate": 2.604323485973793e-05,
"loss": 1.7914,
"step": 113
},
{
"epoch": 0.5513905683192262,
"grad_norm": 0.43662911653518677,
"learning_rate": 2.555209056864474e-05,
"loss": 1.7344,
"step": 114
},
{
"epoch": 0.5562273276904474,
"grad_norm": 0.48569121956825256,
"learning_rate": 2.5062162291577978e-05,
"loss": 1.7216,
"step": 115
},
{
"epoch": 0.5610640870616687,
"grad_norm": 0.4334430992603302,
"learning_rate": 2.4573583970036712e-05,
"loss": 1.7228,
"step": 116
},
{
"epoch": 0.56590084643289,
"grad_norm": 0.4561009705066681,
"learning_rate": 2.4086489176455595e-05,
"loss": 1.7068,
"step": 117
},
{
"epoch": 0.5707376058041113,
"grad_norm": 0.42410483956336975,
"learning_rate": 2.36010110776876e-05,
"loss": 1.7405,
"step": 118
},
{
"epoch": 0.5755743651753326,
"grad_norm": 0.46949705481529236,
"learning_rate": 2.3117282398597456e-05,
"loss": 1.7154,
"step": 119
},
{
"epoch": 0.5804111245465539,
"grad_norm": 0.4945320785045624,
"learning_rate": 2.263543538577603e-05,
"loss": 1.7313,
"step": 120
},
{
"epoch": 0.585247883917775,
"grad_norm": 0.49296748638153076,
"learning_rate": 2.215560177138541e-05,
"loss": 1.8307,
"step": 121
},
{
"epoch": 0.5900846432889963,
"grad_norm": 0.5368390083312988,
"learning_rate": 2.167791273714467e-05,
"loss": 1.7453,
"step": 122
},
{
"epoch": 0.5949214026602176,
"grad_norm": 0.47453513741493225,
"learning_rate": 2.1202498878466062e-05,
"loss": 1.7756,
"step": 123
},
{
"epoch": 0.599758162031439,
"grad_norm": 0.5000871419906616,
"learning_rate": 2.072949016875158e-05,
"loss": 1.628,
"step": 124
},
{
"epoch": 0.6045949214026602,
"grad_norm": 0.5026735663414001,
"learning_rate": 2.0259015923859498e-05,
"loss": 1.8197,
"step": 125
},
{
"epoch": 0.6094316807738815,
"grad_norm": 0.47887665033340454,
"learning_rate": 1.979120476675071e-05,
"loss": 1.7951,
"step": 126
},
{
"epoch": 0.6142684401451027,
"grad_norm": 0.46891364455223083,
"learning_rate": 1.9326184592324503e-05,
"loss": 1.6736,
"step": 127
},
{
"epoch": 0.619105199516324,
"grad_norm": 0.5452415943145752,
"learning_rate": 1.8864082532453373e-05,
"loss": 1.8137,
"step": 128
},
{
"epoch": 0.6239419588875453,
"grad_norm": 0.5273683667182922,
"learning_rate": 1.840502492122644e-05,
"loss": 1.7407,
"step": 129
},
{
"epoch": 0.6287787182587666,
"grad_norm": 0.47598665952682495,
"learning_rate": 1.7949137260410924e-05,
"loss": 1.7605,
"step": 130
},
{
"epoch": 0.6336154776299879,
"grad_norm": 0.5396198034286499,
"learning_rate": 1.7496544185141295e-05,
"loss": 1.7761,
"step": 131
},
{
"epoch": 0.6384522370012092,
"grad_norm": 0.48490336537361145,
"learning_rate": 1.7047369429845216e-05,
"loss": 1.6718,
"step": 132
},
{
"epoch": 0.6432889963724304,
"grad_norm": 0.4524966776371002,
"learning_rate": 1.6601735794415806e-05,
"loss": 1.6303,
"step": 133
},
{
"epoch": 0.6481257557436517,
"grad_norm": 0.4709789454936981,
"learning_rate": 1.615976511063937e-05,
"loss": 1.7838,
"step": 134
},
{
"epoch": 0.652962515114873,
"grad_norm": 0.46935707330703735,
"learning_rate": 1.5721578208887793e-05,
"loss": 1.6582,
"step": 135
},
{
"epoch": 0.6577992744860943,
"grad_norm": 0.5257735252380371,
"learning_rate": 1.5287294885084766e-05,
"loss": 1.7776,
"step": 136
},
{
"epoch": 0.6626360338573156,
"grad_norm": 0.5525801777839661,
"learning_rate": 1.4857033867954697e-05,
"loss": 1.8144,
"step": 137
},
{
"epoch": 0.6674727932285369,
"grad_norm": 0.4242516756057739,
"learning_rate": 1.4430912786563554e-05,
"loss": 1.7099,
"step": 138
},
{
"epoch": 0.6723095525997581,
"grad_norm": 0.5075966715812683,
"learning_rate": 1.4009048138160195e-05,
"loss": 1.7698,
"step": 139
},
{
"epoch": 0.6771463119709794,
"grad_norm": 0.5571949481964111,
"learning_rate": 1.3591555256327199e-05,
"loss": 1.7397,
"step": 140
},
{
"epoch": 0.6819830713422007,
"grad_norm": 0.5257452726364136,
"learning_rate": 1.3178548279449822e-05,
"loss": 1.7735,
"step": 141
},
{
"epoch": 0.686819830713422,
"grad_norm": 0.4458635747432709,
"learning_rate": 1.2770140119511693e-05,
"loss": 1.8301,
"step": 142
},
{
"epoch": 0.6916565900846433,
"grad_norm": 0.5735492706298828,
"learning_rate": 1.2366442431225809e-05,
"loss": 1.7515,
"step": 143
},
{
"epoch": 0.6964933494558646,
"grad_norm": 0.5404819250106812,
"learning_rate": 1.1967565581509248e-05,
"loss": 1.8213,
"step": 144
},
{
"epoch": 0.7013301088270859,
"grad_norm": 0.6705479621887207,
"learning_rate": 1.1573618619309965e-05,
"loss": 1.7866,
"step": 145
},
{
"epoch": 0.7061668681983071,
"grad_norm": 0.7792295217514038,
"learning_rate": 1.1184709245793889e-05,
"loss": 1.7902,
"step": 146
},
{
"epoch": 0.7110036275695284,
"grad_norm": 0.8956598043441772,
"learning_rate": 1.0800943784900502e-05,
"loss": 1.8193,
"step": 147
},
{
"epoch": 0.7158403869407497,
"grad_norm": 0.7993762493133545,
"learning_rate": 1.042242715427494e-05,
"loss": 1.688,
"step": 148
},
{
"epoch": 0.720677146311971,
"grad_norm": 0.8420385122299194,
"learning_rate": 1.004926283658455e-05,
"loss": 1.8126,
"step": 149
},
{
"epoch": 0.7255139056831923,
"grad_norm": 0.9537421464920044,
"learning_rate": 9.681552851227774e-06,
"loss": 1.4474,
"step": 150
},
{
"epoch": 0.7255139056831923,
"eval_loss": 1.7413969039916992,
"eval_runtime": 18.3528,
"eval_samples_per_second": 18.962,
"eval_steps_per_second": 4.74,
"step": 150
},
{
"epoch": 0.7303506650544136,
"grad_norm": 0.46916308999061584,
"learning_rate": 9.319397726443026e-06,
"loss": 2.028,
"step": 151
},
{
"epoch": 0.7351874244256348,
"grad_norm": 0.43807369470596313,
"learning_rate": 8.962896471825342e-06,
"loss": 1.7917,
"step": 152
},
{
"epoch": 0.7400241837968561,
"grad_norm": 0.530551016330719,
"learning_rate": 8.61214655125809e-06,
"loss": 1.8804,
"step": 153
},
{
"epoch": 0.7448609431680774,
"grad_norm": 0.5489689707756042,
"learning_rate": 8.267243856267331e-06,
"loss": 1.8999,
"step": 154
},
{
"epoch": 0.7496977025392987,
"grad_norm": 0.47269207239151,
"learning_rate": 7.928282679806052e-06,
"loss": 1.6923,
"step": 155
},
{
"epoch": 0.75453446191052,
"grad_norm": 0.5164920687675476,
"learning_rate": 7.595355690475393e-06,
"loss": 1.7004,
"step": 156
},
{
"epoch": 0.7593712212817413,
"grad_norm": 0.4851786494255066,
"learning_rate": 7.268553907189964e-06,
"loss": 1.6288,
"step": 157
},
{
"epoch": 0.7642079806529625,
"grad_norm": 0.5359386801719666,
"learning_rate": 6.947966674294236e-06,
"loss": 1.6777,
"step": 158
},
{
"epoch": 0.7690447400241838,
"grad_norm": 0.5173937678337097,
"learning_rate": 6.6336816371366305e-06,
"loss": 1.6535,
"step": 159
},
{
"epoch": 0.7738814993954051,
"grad_norm": 0.4466940462589264,
"learning_rate": 6.325784718108196e-06,
"loss": 1.8034,
"step": 160
},
{
"epoch": 0.7787182587666264,
"grad_norm": 0.4748440086841583,
"learning_rate": 6.0243600931522595e-06,
"loss": 1.7022,
"step": 161
},
{
"epoch": 0.7835550181378477,
"grad_norm": 0.49979591369628906,
"learning_rate": 5.72949016875158e-06,
"loss": 1.6983,
"step": 162
},
{
"epoch": 0.788391777509069,
"grad_norm": 0.47810453176498413,
"learning_rate": 5.44125555939923e-06,
"loss": 1.7931,
"step": 163
},
{
"epoch": 0.7932285368802902,
"grad_norm": 0.444297194480896,
"learning_rate": 5.159735065559399e-06,
"loss": 1.785,
"step": 164
},
{
"epoch": 0.7980652962515115,
"grad_norm": 0.4740820825099945,
"learning_rate": 4.885005652124144e-06,
"loss": 1.7403,
"step": 165
},
{
"epoch": 0.8029020556227328,
"grad_norm": 0.4951646625995636,
"learning_rate": 4.617142427371934e-06,
"loss": 1.7834,
"step": 166
},
{
"epoch": 0.8077388149939541,
"grad_norm": 0.49473491311073303,
"learning_rate": 4.3562186224338265e-06,
"loss": 1.7891,
"step": 167
},
{
"epoch": 0.8125755743651754,
"grad_norm": 0.5567054152488708,
"learning_rate": 4.102305571272783e-06,
"loss": 1.742,
"step": 168
},
{
"epoch": 0.8174123337363967,
"grad_norm": 0.531502366065979,
"learning_rate": 3.855472691181678e-06,
"loss": 1.7225,
"step": 169
},
{
"epoch": 0.8222490931076178,
"grad_norm": 0.4598694443702698,
"learning_rate": 3.615787463805331e-06,
"loss": 1.6636,
"step": 170
},
{
"epoch": 0.8270858524788391,
"grad_norm": 0.46727102994918823,
"learning_rate": 3.383315416691646e-06,
"loss": 1.6728,
"step": 171
},
{
"epoch": 0.8319226118500604,
"grad_norm": 0.4816998541355133,
"learning_rate": 3.158120105377096e-06,
"loss": 1.7808,
"step": 172
},
{
"epoch": 0.8367593712212817,
"grad_norm": 0.4520528316497803,
"learning_rate": 2.940263096011233e-06,
"loss": 1.573,
"step": 173
},
{
"epoch": 0.841596130592503,
"grad_norm": 0.4842596650123596,
"learning_rate": 2.729803948525125e-06,
"loss": 1.838,
"step": 174
},
{
"epoch": 0.8464328899637243,
"grad_norm": 0.5535080432891846,
"learning_rate": 2.526800200348275e-06,
"loss": 1.7461,
"step": 175
},
{
"epoch": 0.8512696493349455,
"grad_norm": 0.5143581032752991,
"learning_rate": 2.3313073506784575e-06,
"loss": 1.6,
"step": 176
},
{
"epoch": 0.8561064087061668,
"grad_norm": 0.5159276127815247,
"learning_rate": 2.143378845308791e-06,
"loss": 1.6323,
"step": 177
},
{
"epoch": 0.8609431680773881,
"grad_norm": 0.4979780316352844,
"learning_rate": 1.9630660620161777e-06,
"loss": 1.779,
"step": 178
},
{
"epoch": 0.8657799274486094,
"grad_norm": 0.5675414800643921,
"learning_rate": 1.790418296515165e-06,
"loss": 1.7153,
"step": 179
},
{
"epoch": 0.8706166868198307,
"grad_norm": 0.4874797761440277,
"learning_rate": 1.625482748980961e-06,
"loss": 1.7539,
"step": 180
},
{
"epoch": 0.875453446191052,
"grad_norm": 0.5073657631874084,
"learning_rate": 1.4683045111453942e-06,
"loss": 1.6124,
"step": 181
},
{
"epoch": 0.8802902055622733,
"grad_norm": 0.4889651834964752,
"learning_rate": 1.3189265539692707e-06,
"loss": 1.6506,
"step": 182
},
{
"epoch": 0.8851269649334945,
"grad_norm": 0.6806184649467468,
"learning_rate": 1.1773897158945557e-06,
"loss": 1.6727,
"step": 183
},
{
"epoch": 0.8899637243047158,
"grad_norm": 0.5414454936981201,
"learning_rate": 1.0437326916795432e-06,
"loss": 1.7342,
"step": 184
},
{
"epoch": 0.8948004836759371,
"grad_norm": 0.42585650086402893,
"learning_rate": 9.179920218200888e-07,
"loss": 1.6879,
"step": 185
},
{
"epoch": 0.8996372430471584,
"grad_norm": 0.45129069685935974,
"learning_rate": 8.002020825598277e-07,
"loss": 1.6605,
"step": 186
},
{
"epoch": 0.9044740024183797,
"grad_norm": 0.6295913457870483,
"learning_rate": 6.90395076492022e-07,
"loss": 1.7574,
"step": 187
},
{
"epoch": 0.909310761789601,
"grad_norm": 0.4469881057739258,
"learning_rate": 5.886010237557194e-07,
"loss": 1.5844,
"step": 188
},
{
"epoch": 0.9141475211608222,
"grad_norm": 0.5988472700119019,
"learning_rate": 4.94847753828529e-07,
"loss": 1.9071,
"step": 189
},
{
"epoch": 0.9189842805320435,
"grad_norm": 0.4462520182132721,
"learning_rate": 4.091608979183303e-07,
"loss": 1.7049,
"step": 190
},
{
"epoch": 0.9238210399032648,
"grad_norm": 0.5181970596313477,
"learning_rate": 3.315638819559452e-07,
"loss": 1.8397,
"step": 191
},
{
"epoch": 0.9286577992744861,
"grad_norm": 0.5435971617698669,
"learning_rate": 2.6207792019074414e-07,
"loss": 1.6886,
"step": 192
},
{
"epoch": 0.9334945586457074,
"grad_norm": 0.5636361241340637,
"learning_rate": 2.0072200939085573e-07,
"loss": 1.6915,
"step": 193
},
{
"epoch": 0.9383313180169287,
"grad_norm": 0.5563094615936279,
"learning_rate": 1.475129236496575e-07,
"loss": 1.7518,
"step": 194
},
{
"epoch": 0.9431680773881499,
"grad_norm": 0.5630325078964233,
"learning_rate": 1.0246520979990459e-07,
"loss": 1.8014,
"step": 195
},
{
"epoch": 0.9480048367593712,
"grad_norm": 0.6778345108032227,
"learning_rate": 6.559118343676396e-08,
"loss": 1.6761,
"step": 196
},
{
"epoch": 0.9528415961305925,
"grad_norm": 0.8598151803016663,
"learning_rate": 3.690092555085789e-08,
"loss": 1.7755,
"step": 197
},
{
"epoch": 0.9576783555018138,
"grad_norm": 0.8544036149978638,
"learning_rate": 1.640227977221853e-08,
"loss": 1.7744,
"step": 198
},
{
"epoch": 0.9625151148730351,
"grad_norm": 0.988280177116394,
"learning_rate": 4.1008502259298755e-09,
"loss": 1.751,
"step": 199
},
{
"epoch": 0.9673518742442564,
"grad_norm": 1.538628101348877,
"learning_rate": 0.0,
"loss": 1.4039,
"step": 200
},
{
"epoch": 0.9673518742442564,
"eval_loss": 1.7320468425750732,
"eval_runtime": 18.3522,
"eval_samples_per_second": 18.962,
"eval_steps_per_second": 4.741,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.582807765745664e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}