bbytxt's picture
Training in progress, step 200, checkpoint
761d558 verified
raw
history blame
37.7 kB
{
"best_metric": 7.1964921951293945,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.05379597875058839,
"eval_steps": 25,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00026897989375294196,
"grad_norm": 14.062777519226074,
"learning_rate": 2.9999999999999997e-05,
"loss": 9.9528,
"step": 1
},
{
"epoch": 0.00026897989375294196,
"eval_loss": 10.383614540100098,
"eval_runtime": 1.103,
"eval_samples_per_second": 45.331,
"eval_steps_per_second": 6.346,
"step": 1
},
{
"epoch": 0.0005379597875058839,
"grad_norm": 9.013472557067871,
"learning_rate": 5.9999999999999995e-05,
"loss": 10.4374,
"step": 2
},
{
"epoch": 0.0008069396812588259,
"grad_norm": 9.113578796386719,
"learning_rate": 8.999999999999999e-05,
"loss": 10.5416,
"step": 3
},
{
"epoch": 0.0010759195750117679,
"grad_norm": 9.272452354431152,
"learning_rate": 0.00011999999999999999,
"loss": 10.5582,
"step": 4
},
{
"epoch": 0.0013448994687647099,
"grad_norm": 9.502043724060059,
"learning_rate": 0.00015,
"loss": 10.4131,
"step": 5
},
{
"epoch": 0.0016138793625176519,
"grad_norm": 12.628140449523926,
"learning_rate": 0.00017999999999999998,
"loss": 9.5481,
"step": 6
},
{
"epoch": 0.0018828592562705937,
"grad_norm": 10.771757125854492,
"learning_rate": 0.00020999999999999998,
"loss": 9.7362,
"step": 7
},
{
"epoch": 0.0021518391500235357,
"grad_norm": 8.7677640914917,
"learning_rate": 0.00023999999999999998,
"loss": 9.394,
"step": 8
},
{
"epoch": 0.0024208190437764775,
"grad_norm": 9.826272010803223,
"learning_rate": 0.00027,
"loss": 8.7301,
"step": 9
},
{
"epoch": 0.0026897989375294197,
"grad_norm": 8.201698303222656,
"learning_rate": 0.0003,
"loss": 8.4353,
"step": 10
},
{
"epoch": 0.0029587788312823615,
"grad_norm": 6.270658493041992,
"learning_rate": 0.0002999794957488703,
"loss": 7.7853,
"step": 11
},
{
"epoch": 0.0032277587250353038,
"grad_norm": 5.651599884033203,
"learning_rate": 0.0002999179886011389,
"loss": 7.7574,
"step": 12
},
{
"epoch": 0.0034967386187882456,
"grad_norm": 3.6305131912231445,
"learning_rate": 0.0002998154953722457,
"loss": 7.9822,
"step": 13
},
{
"epoch": 0.0037657185125411874,
"grad_norm": 3.2104008197784424,
"learning_rate": 0.00029967204408281613,
"loss": 8.3468,
"step": 14
},
{
"epoch": 0.004034698406294129,
"grad_norm": 2.4810068607330322,
"learning_rate": 0.00029948767395100045,
"loss": 8.2438,
"step": 15
},
{
"epoch": 0.004303678300047071,
"grad_norm": 2.427516460418701,
"learning_rate": 0.0002992624353817517,
"loss": 8.4045,
"step": 16
},
{
"epoch": 0.004572658193800014,
"grad_norm": 1.9261265993118286,
"learning_rate": 0.0002989963899530457,
"loss": 7.9577,
"step": 17
},
{
"epoch": 0.004841638087552955,
"grad_norm": 2.021862268447876,
"learning_rate": 0.00029868961039904624,
"loss": 8.1164,
"step": 18
},
{
"epoch": 0.005110617981305897,
"grad_norm": 2.315554141998291,
"learning_rate": 0.00029834218059022024,
"loss": 8.1393,
"step": 19
},
{
"epoch": 0.0053795978750588395,
"grad_norm": 2.343036651611328,
"learning_rate": 0.00029795419551040833,
"loss": 7.8621,
"step": 20
},
{
"epoch": 0.005648577768811782,
"grad_norm": 1.6685760021209717,
"learning_rate": 0.00029752576123085736,
"loss": 7.6249,
"step": 21
},
{
"epoch": 0.005917557662564723,
"grad_norm": 1.617172360420227,
"learning_rate": 0.0002970569948812214,
"loss": 7.8443,
"step": 22
},
{
"epoch": 0.006186537556317665,
"grad_norm": 1.611405611038208,
"learning_rate": 0.0002965480246175399,
"loss": 7.1947,
"step": 23
},
{
"epoch": 0.0064555174500706075,
"grad_norm": 1.3807909488677979,
"learning_rate": 0.0002959989895872009,
"loss": 7.7691,
"step": 24
},
{
"epoch": 0.006724497343823549,
"grad_norm": 1.8649704456329346,
"learning_rate": 0.0002954100398908995,
"loss": 7.6449,
"step": 25
},
{
"epoch": 0.006724497343823549,
"eval_loss": 7.789085865020752,
"eval_runtime": 1.1023,
"eval_samples_per_second": 45.359,
"eval_steps_per_second": 6.35,
"step": 25
},
{
"epoch": 0.006993477237576491,
"grad_norm": 1.826485276222229,
"learning_rate": 0.0002947813365416023,
"loss": 7.775,
"step": 26
},
{
"epoch": 0.007262457131329433,
"grad_norm": 2.0122640132904053,
"learning_rate": 0.0002941130514205272,
"loss": 7.4112,
"step": 27
},
{
"epoch": 0.007531437025082375,
"grad_norm": 1.474570870399475,
"learning_rate": 0.0002934053672301536,
"loss": 7.605,
"step": 28
},
{
"epoch": 0.007800416918835317,
"grad_norm": 1.2456080913543701,
"learning_rate": 0.00029265847744427303,
"loss": 7.615,
"step": 29
},
{
"epoch": 0.008069396812588258,
"grad_norm": 1.3614413738250732,
"learning_rate": 0.00029187258625509513,
"loss": 7.1255,
"step": 30
},
{
"epoch": 0.0083383767063412,
"grad_norm": 1.5178754329681396,
"learning_rate": 0.00029104790851742417,
"loss": 7.3523,
"step": 31
},
{
"epoch": 0.008607356600094143,
"grad_norm": 1.0982545614242554,
"learning_rate": 0.0002901846696899191,
"loss": 7.2886,
"step": 32
},
{
"epoch": 0.008876336493847085,
"grad_norm": 0.9949588775634766,
"learning_rate": 0.00028928310577345606,
"loss": 7.705,
"step": 33
},
{
"epoch": 0.009145316387600027,
"grad_norm": 1.0565680265426636,
"learning_rate": 0.0002883434632466077,
"loss": 7.054,
"step": 34
},
{
"epoch": 0.00941429628135297,
"grad_norm": 1.4660252332687378,
"learning_rate": 0.00028736599899825856,
"loss": 6.5231,
"step": 35
},
{
"epoch": 0.00968327617510591,
"grad_norm": 1.0093355178833008,
"learning_rate": 0.00028635098025737434,
"loss": 7.4915,
"step": 36
},
{
"epoch": 0.009952256068858852,
"grad_norm": 1.0597023963928223,
"learning_rate": 0.00028529868451994384,
"loss": 7.5809,
"step": 37
},
{
"epoch": 0.010221235962611794,
"grad_norm": 1.0300654172897339,
"learning_rate": 0.0002842093994731145,
"loss": 7.4441,
"step": 38
},
{
"epoch": 0.010490215856364737,
"grad_norm": 1.0486762523651123,
"learning_rate": 0.00028308342291654174,
"loss": 6.7324,
"step": 39
},
{
"epoch": 0.010759195750117679,
"grad_norm": 0.9966297149658203,
"learning_rate": 0.00028192106268097334,
"loss": 7.7486,
"step": 40
},
{
"epoch": 0.011028175643870621,
"grad_norm": 0.8546872735023499,
"learning_rate": 0.00028072263654409154,
"loss": 6.9361,
"step": 41
},
{
"epoch": 0.011297155537623563,
"grad_norm": 0.9719182252883911,
"learning_rate": 0.0002794884721436361,
"loss": 7.2164,
"step": 42
},
{
"epoch": 0.011566135431376504,
"grad_norm": 1.2323123216629028,
"learning_rate": 0.00027821890688783083,
"loss": 6.7995,
"step": 43
},
{
"epoch": 0.011835115325129446,
"grad_norm": 1.016748070716858,
"learning_rate": 0.0002769142878631403,
"loss": 7.3677,
"step": 44
},
{
"epoch": 0.012104095218882388,
"grad_norm": 0.9493099451065063,
"learning_rate": 0.00027557497173937923,
"loss": 6.8809,
"step": 45
},
{
"epoch": 0.01237307511263533,
"grad_norm": 0.6532073020935059,
"learning_rate": 0.000274201324672203,
"loss": 6.9965,
"step": 46
},
{
"epoch": 0.012642055006388273,
"grad_norm": 1.0315707921981812,
"learning_rate": 0.00027279372220300385,
"loss": 7.3371,
"step": 47
},
{
"epoch": 0.012911034900141215,
"grad_norm": 0.867978036403656,
"learning_rate": 0.0002713525491562421,
"loss": 7.2436,
"step": 48
},
{
"epoch": 0.013180014793894156,
"grad_norm": 0.8053029775619507,
"learning_rate": 0.00026987819953423867,
"loss": 6.8425,
"step": 49
},
{
"epoch": 0.013448994687647098,
"grad_norm": 1.2040008306503296,
"learning_rate": 0.00026837107640945905,
"loss": 6.9863,
"step": 50
},
{
"epoch": 0.013448994687647098,
"eval_loss": 7.4686126708984375,
"eval_runtime": 1.1025,
"eval_samples_per_second": 45.352,
"eval_steps_per_second": 6.349,
"step": 50
},
{
"epoch": 0.01371797458140004,
"grad_norm": 2.073488235473633,
"learning_rate": 0.0002668315918143169,
"loss": 6.6067,
"step": 51
},
{
"epoch": 0.013986954475152982,
"grad_norm": 1.6959115266799927,
"learning_rate": 0.00026526016662852886,
"loss": 7.4615,
"step": 52
},
{
"epoch": 0.014255934368905925,
"grad_norm": 1.1312484741210938,
"learning_rate": 0.00026365723046405023,
"loss": 7.562,
"step": 53
},
{
"epoch": 0.014524914262658867,
"grad_norm": 0.8906031847000122,
"learning_rate": 0.0002620232215476231,
"loss": 7.8548,
"step": 54
},
{
"epoch": 0.014793894156411809,
"grad_norm": 1.1955941915512085,
"learning_rate": 0.0002603585866009697,
"loss": 7.5388,
"step": 55
},
{
"epoch": 0.01506287405016475,
"grad_norm": 1.6056780815124512,
"learning_rate": 0.00025866378071866334,
"loss": 6.7458,
"step": 56
},
{
"epoch": 0.015331853943917692,
"grad_norm": 0.9653465151786804,
"learning_rate": 0.00025693926724370956,
"loss": 7.2338,
"step": 57
},
{
"epoch": 0.015600833837670634,
"grad_norm": 1.055378794670105,
"learning_rate": 0.00025518551764087326,
"loss": 7.2497,
"step": 58
},
{
"epoch": 0.015869813731423574,
"grad_norm": 0.982029378414154,
"learning_rate": 0.00025340301136778483,
"loss": 6.3055,
"step": 59
},
{
"epoch": 0.016138793625176517,
"grad_norm": 1.4051647186279297,
"learning_rate": 0.00025159223574386114,
"loss": 6.7392,
"step": 60
},
{
"epoch": 0.01640777351892946,
"grad_norm": 1.224535346031189,
"learning_rate": 0.0002497536858170772,
"loss": 6.2815,
"step": 61
},
{
"epoch": 0.0166767534126824,
"grad_norm": 0.9965676069259644,
"learning_rate": 0.00024788786422862526,
"loss": 6.5987,
"step": 62
},
{
"epoch": 0.016945733306435343,
"grad_norm": 0.7063784599304199,
"learning_rate": 0.00024599528107549745,
"loss": 6.5176,
"step": 63
},
{
"epoch": 0.017214713200188286,
"grad_norm": 1.3905584812164307,
"learning_rate": 0.00024407645377103054,
"loss": 7.3527,
"step": 64
},
{
"epoch": 0.017483693093941228,
"grad_norm": 0.6794236898422241,
"learning_rate": 0.00024213190690345018,
"loss": 7.1635,
"step": 65
},
{
"epoch": 0.01775267298769417,
"grad_norm": 0.85134357213974,
"learning_rate": 0.00024016217209245374,
"loss": 6.9162,
"step": 66
},
{
"epoch": 0.018021652881447112,
"grad_norm": 1.1831365823745728,
"learning_rate": 0.00023816778784387094,
"loss": 7.4826,
"step": 67
},
{
"epoch": 0.018290632775200055,
"grad_norm": 0.7540268301963806,
"learning_rate": 0.0002361492994024415,
"loss": 7.5541,
"step": 68
},
{
"epoch": 0.018559612668952997,
"grad_norm": 0.8305040597915649,
"learning_rate": 0.0002341072586027509,
"loss": 7.0236,
"step": 69
},
{
"epoch": 0.01882859256270594,
"grad_norm": 0.8232185244560242,
"learning_rate": 0.00023204222371836405,
"loss": 7.256,
"step": 70
},
{
"epoch": 0.01909757245645888,
"grad_norm": 0.8017435669898987,
"learning_rate": 0.00022995475930919905,
"loss": 7.2468,
"step": 71
},
{
"epoch": 0.01936655235021182,
"grad_norm": 0.7299101948738098,
"learning_rate": 0.00022784543606718227,
"loss": 6.8815,
"step": 72
},
{
"epoch": 0.019635532243964762,
"grad_norm": 0.9603165984153748,
"learning_rate": 0.00022571483066022657,
"loss": 7.1806,
"step": 73
},
{
"epoch": 0.019904512137717705,
"grad_norm": 0.7412318587303162,
"learning_rate": 0.0002235635255745762,
"loss": 6.8687,
"step": 74
},
{
"epoch": 0.020173492031470647,
"grad_norm": 1.5883235931396484,
"learning_rate": 0.00022139210895556104,
"loss": 6.6477,
"step": 75
},
{
"epoch": 0.020173492031470647,
"eval_loss": 7.266612529754639,
"eval_runtime": 1.1026,
"eval_samples_per_second": 45.347,
"eval_steps_per_second": 6.349,
"step": 75
},
{
"epoch": 0.02044247192522359,
"grad_norm": 0.874472975730896,
"learning_rate": 0.00021920117444680317,
"loss": 6.5988,
"step": 76
},
{
"epoch": 0.02071145181897653,
"grad_norm": 1.0724687576293945,
"learning_rate": 0.00021699132102792097,
"loss": 7.0543,
"step": 77
},
{
"epoch": 0.020980431712729473,
"grad_norm": 0.7699567079544067,
"learning_rate": 0.0002147631528507739,
"loss": 7.1625,
"step": 78
},
{
"epoch": 0.021249411606482416,
"grad_norm": 0.622986912727356,
"learning_rate": 0.00021251727907429355,
"loss": 7.4872,
"step": 79
},
{
"epoch": 0.021518391500235358,
"grad_norm": 0.9322075843811035,
"learning_rate": 0.0002102543136979454,
"loss": 7.0894,
"step": 80
},
{
"epoch": 0.0217873713939883,
"grad_norm": 0.6225010752677917,
"learning_rate": 0.0002079748753938678,
"loss": 6.9691,
"step": 81
},
{
"epoch": 0.022056351287741242,
"grad_norm": 0.9002844095230103,
"learning_rate": 0.0002056795873377331,
"loss": 6.5822,
"step": 82
},
{
"epoch": 0.022325331181494185,
"grad_norm": 0.6348511576652527,
"learning_rate": 0.00020336907703837748,
"loss": 6.8699,
"step": 83
},
{
"epoch": 0.022594311075247127,
"grad_norm": 0.5231301188468933,
"learning_rate": 0.00020104397616624645,
"loss": 6.8725,
"step": 84
},
{
"epoch": 0.022863290969000066,
"grad_norm": 0.68709397315979,
"learning_rate": 0.00019870492038070252,
"loss": 6.7312,
"step": 85
},
{
"epoch": 0.023132270862753008,
"grad_norm": 0.47030457854270935,
"learning_rate": 0.0001963525491562421,
"loss": 6.7572,
"step": 86
},
{
"epoch": 0.02340125075650595,
"grad_norm": 0.7477748394012451,
"learning_rate": 0.0001939875056076697,
"loss": 7.014,
"step": 87
},
{
"epoch": 0.023670230650258892,
"grad_norm": 0.9135372638702393,
"learning_rate": 0.00019161043631427666,
"loss": 6.8656,
"step": 88
},
{
"epoch": 0.023939210544011835,
"grad_norm": 0.7729247212409973,
"learning_rate": 0.00018922199114307294,
"loss": 6.8811,
"step": 89
},
{
"epoch": 0.024208190437764777,
"grad_norm": 0.8843768835067749,
"learning_rate": 0.00018682282307111987,
"loss": 6.8623,
"step": 90
},
{
"epoch": 0.02447717033151772,
"grad_norm": 0.5804451107978821,
"learning_rate": 0.00018441358800701273,
"loss": 6.8921,
"step": 91
},
{
"epoch": 0.02474615022527066,
"grad_norm": 0.9439037442207336,
"learning_rate": 0.00018199494461156203,
"loss": 7.4667,
"step": 92
},
{
"epoch": 0.025015130119023603,
"grad_norm": 0.6652296185493469,
"learning_rate": 0.000179567554117722,
"loss": 7.1213,
"step": 93
},
{
"epoch": 0.025284110012776546,
"grad_norm": 0.9313641786575317,
"learning_rate": 0.00017713208014981648,
"loss": 6.9055,
"step": 94
},
{
"epoch": 0.025553089906529488,
"grad_norm": 0.5718141198158264,
"learning_rate": 0.00017468918854211007,
"loss": 7.4156,
"step": 95
},
{
"epoch": 0.02582206980028243,
"grad_norm": 0.90057772397995,
"learning_rate": 0.00017223954715677627,
"loss": 7.0547,
"step": 96
},
{
"epoch": 0.026091049694035372,
"grad_norm": 0.8088359236717224,
"learning_rate": 0.00016978382570131034,
"loss": 6.8638,
"step": 97
},
{
"epoch": 0.02636002958778831,
"grad_norm": 0.7129319310188293,
"learning_rate": 0.00016732269554543794,
"loss": 7.0757,
"step": 98
},
{
"epoch": 0.026629009481541253,
"grad_norm": 0.7212324142456055,
"learning_rate": 0.00016485682953756942,
"loss": 7.3563,
"step": 99
},
{
"epoch": 0.026897989375294196,
"grad_norm": 1.3920371532440186,
"learning_rate": 0.00016238690182084986,
"loss": 7.3419,
"step": 100
},
{
"epoch": 0.026897989375294196,
"eval_loss": 7.306600570678711,
"eval_runtime": 1.1005,
"eval_samples_per_second": 45.435,
"eval_steps_per_second": 6.361,
"step": 100
},
{
"epoch": 0.027166969269047138,
"grad_norm": 1.9963257312774658,
"learning_rate": 0.0001599135876488549,
"loss": 6.673,
"step": 101
},
{
"epoch": 0.02743594916280008,
"grad_norm": 1.888506293296814,
"learning_rate": 0.00015743756320098332,
"loss": 7.1533,
"step": 102
},
{
"epoch": 0.027704929056553022,
"grad_norm": 1.8473235368728638,
"learning_rate": 0.0001549595053975962,
"loss": 7.442,
"step": 103
},
{
"epoch": 0.027973908950305965,
"grad_norm": 1.1786754131317139,
"learning_rate": 0.00015248009171495378,
"loss": 7.4846,
"step": 104
},
{
"epoch": 0.028242888844058907,
"grad_norm": 1.2605340480804443,
"learning_rate": 0.00015,
"loss": 7.1673,
"step": 105
},
{
"epoch": 0.02851186873781185,
"grad_norm": 0.6456354856491089,
"learning_rate": 0.00014751990828504622,
"loss": 7.1138,
"step": 106
},
{
"epoch": 0.02878084863156479,
"grad_norm": 1.4024274349212646,
"learning_rate": 0.00014504049460240375,
"loss": 6.8081,
"step": 107
},
{
"epoch": 0.029049828525317734,
"grad_norm": 1.4252393245697021,
"learning_rate": 0.00014256243679901663,
"loss": 6.7006,
"step": 108
},
{
"epoch": 0.029318808419070676,
"grad_norm": 1.7832438945770264,
"learning_rate": 0.00014008641235114508,
"loss": 6.3993,
"step": 109
},
{
"epoch": 0.029587788312823618,
"grad_norm": 1.6940126419067383,
"learning_rate": 0.00013761309817915014,
"loss": 6.4566,
"step": 110
},
{
"epoch": 0.029856768206576557,
"grad_norm": 1.3766435384750366,
"learning_rate": 0.00013514317046243058,
"loss": 6.479,
"step": 111
},
{
"epoch": 0.0301257481003295,
"grad_norm": 1.0472962856292725,
"learning_rate": 0.00013267730445456208,
"loss": 6.0186,
"step": 112
},
{
"epoch": 0.03039472799408244,
"grad_norm": 1.0262020826339722,
"learning_rate": 0.00013021617429868963,
"loss": 7.0508,
"step": 113
},
{
"epoch": 0.030663707887835383,
"grad_norm": 1.0168136358261108,
"learning_rate": 0.00012776045284322368,
"loss": 6.2651,
"step": 114
},
{
"epoch": 0.030932687781588326,
"grad_norm": 1.360337734222412,
"learning_rate": 0.00012531081145788987,
"loss": 7.0073,
"step": 115
},
{
"epoch": 0.031201667675341268,
"grad_norm": 1.2837754487991333,
"learning_rate": 0.00012286791985018355,
"loss": 7.3239,
"step": 116
},
{
"epoch": 0.031470647569094214,
"grad_norm": 1.3919296264648438,
"learning_rate": 0.00012043244588227796,
"loss": 6.6288,
"step": 117
},
{
"epoch": 0.03173962746284715,
"grad_norm": 1.2884666919708252,
"learning_rate": 0.00011800505538843798,
"loss": 7.1069,
"step": 118
},
{
"epoch": 0.03200860735660009,
"grad_norm": 1.1876237392425537,
"learning_rate": 0.00011558641199298727,
"loss": 6.9297,
"step": 119
},
{
"epoch": 0.03227758725035303,
"grad_norm": 1.10402512550354,
"learning_rate": 0.00011317717692888012,
"loss": 6.6218,
"step": 120
},
{
"epoch": 0.032546567144105976,
"grad_norm": 1.4770866632461548,
"learning_rate": 0.00011077800885692702,
"loss": 7.4542,
"step": 121
},
{
"epoch": 0.03281554703785892,
"grad_norm": 1.2299976348876953,
"learning_rate": 0.00010838956368572334,
"loss": 7.1409,
"step": 122
},
{
"epoch": 0.03308452693161186,
"grad_norm": 0.6715269684791565,
"learning_rate": 0.0001060124943923303,
"loss": 6.9305,
"step": 123
},
{
"epoch": 0.0333535068253648,
"grad_norm": 0.812654435634613,
"learning_rate": 0.0001036474508437579,
"loss": 6.6642,
"step": 124
},
{
"epoch": 0.033622486719117745,
"grad_norm": 1.3478944301605225,
"learning_rate": 0.00010129507961929748,
"loss": 7.0165,
"step": 125
},
{
"epoch": 0.033622486719117745,
"eval_loss": 7.224481105804443,
"eval_runtime": 1.101,
"eval_samples_per_second": 45.415,
"eval_steps_per_second": 6.358,
"step": 125
},
{
"epoch": 0.03389146661287069,
"grad_norm": 1.1918294429779053,
"learning_rate": 9.895602383375353e-05,
"loss": 6.6002,
"step": 126
},
{
"epoch": 0.03416044650662363,
"grad_norm": 0.9166633486747742,
"learning_rate": 9.663092296162251e-05,
"loss": 7.461,
"step": 127
},
{
"epoch": 0.03442942640037657,
"grad_norm": 0.7154397964477539,
"learning_rate": 9.432041266226686e-05,
"loss": 7.38,
"step": 128
},
{
"epoch": 0.034698406294129513,
"grad_norm": 1.2024431228637695,
"learning_rate": 9.202512460613219e-05,
"loss": 6.5682,
"step": 129
},
{
"epoch": 0.034967386187882456,
"grad_norm": 1.2459428310394287,
"learning_rate": 8.97456863020546e-05,
"loss": 7.3055,
"step": 130
},
{
"epoch": 0.0352363660816354,
"grad_norm": 0.7260080575942993,
"learning_rate": 8.748272092570646e-05,
"loss": 6.581,
"step": 131
},
{
"epoch": 0.03550534597538834,
"grad_norm": 0.5916652679443359,
"learning_rate": 8.523684714922608e-05,
"loss": 6.9627,
"step": 132
},
{
"epoch": 0.03577432586914128,
"grad_norm": 0.7819718718528748,
"learning_rate": 8.300867897207903e-05,
"loss": 6.668,
"step": 133
},
{
"epoch": 0.036043305762894225,
"grad_norm": 0.8700635433197021,
"learning_rate": 8.079882555319684e-05,
"loss": 7.2856,
"step": 134
},
{
"epoch": 0.03631228565664717,
"grad_norm": 0.8389477133750916,
"learning_rate": 7.860789104443896e-05,
"loss": 7.0287,
"step": 135
},
{
"epoch": 0.03658126555040011,
"grad_norm": 0.8980041742324829,
"learning_rate": 7.643647442542382e-05,
"loss": 7.1966,
"step": 136
},
{
"epoch": 0.03685024544415305,
"grad_norm": 0.7039759159088135,
"learning_rate": 7.428516933977347e-05,
"loss": 6.7216,
"step": 137
},
{
"epoch": 0.037119225337905994,
"grad_norm": 0.8420053124427795,
"learning_rate": 7.215456393281776e-05,
"loss": 7.2763,
"step": 138
},
{
"epoch": 0.037388205231658936,
"grad_norm": 0.8006654381752014,
"learning_rate": 7.004524069080096e-05,
"loss": 6.8812,
"step": 139
},
{
"epoch": 0.03765718512541188,
"grad_norm": 0.5510133504867554,
"learning_rate": 6.795777628163599e-05,
"loss": 7.1272,
"step": 140
},
{
"epoch": 0.03792616501916482,
"grad_norm": 0.6548418998718262,
"learning_rate": 6.58927413972491e-05,
"loss": 6.5317,
"step": 141
},
{
"epoch": 0.03819514491291776,
"grad_norm": 0.618125319480896,
"learning_rate": 6.385070059755846e-05,
"loss": 7.2238,
"step": 142
},
{
"epoch": 0.038464124806670705,
"grad_norm": 0.7045526504516602,
"learning_rate": 6.183221215612904e-05,
"loss": 7.0095,
"step": 143
},
{
"epoch": 0.03873310470042364,
"grad_norm": 0.8406367897987366,
"learning_rate": 5.983782790754623e-05,
"loss": 6.357,
"step": 144
},
{
"epoch": 0.03900208459417658,
"grad_norm": 0.5313419103622437,
"learning_rate": 5.786809309654982e-05,
"loss": 6.9844,
"step": 145
},
{
"epoch": 0.039271064487929525,
"grad_norm": 0.7371297478675842,
"learning_rate": 5.592354622896944e-05,
"loss": 7.2803,
"step": 146
},
{
"epoch": 0.03954004438168247,
"grad_norm": 0.5858275294303894,
"learning_rate": 5.40047189245025e-05,
"loss": 7.0337,
"step": 147
},
{
"epoch": 0.03980902427543541,
"grad_norm": 0.9702624678611755,
"learning_rate": 5.211213577137469e-05,
"loss": 7.0098,
"step": 148
},
{
"epoch": 0.04007800416918835,
"grad_norm": 0.7458012104034424,
"learning_rate": 5.024631418292274e-05,
"loss": 7.1638,
"step": 149
},
{
"epoch": 0.040346984062941293,
"grad_norm": 1.1584868431091309,
"learning_rate": 4.840776425613886e-05,
"loss": 7.3627,
"step": 150
},
{
"epoch": 0.040346984062941293,
"eval_loss": 7.293600082397461,
"eval_runtime": 1.0743,
"eval_samples_per_second": 46.541,
"eval_steps_per_second": 6.516,
"step": 150
},
{
"epoch": 0.040615963956694236,
"grad_norm": 1.378065586090088,
"learning_rate": 4.659698863221513e-05,
"loss": 6.2633,
"step": 151
},
{
"epoch": 0.04088494385044718,
"grad_norm": 1.318018913269043,
"learning_rate": 4.481448235912671e-05,
"loss": 6.6505,
"step": 152
},
{
"epoch": 0.04115392374420012,
"grad_norm": 1.5204188823699951,
"learning_rate": 4.306073275629044e-05,
"loss": 7.352,
"step": 153
},
{
"epoch": 0.04142290363795306,
"grad_norm": 1.6174471378326416,
"learning_rate": 4.133621928133665e-05,
"loss": 7.3426,
"step": 154
},
{
"epoch": 0.041691883531706005,
"grad_norm": 0.9823394417762756,
"learning_rate": 3.964141339903026e-05,
"loss": 7.2325,
"step": 155
},
{
"epoch": 0.04196086342545895,
"grad_norm": 0.9155641794204712,
"learning_rate": 3.797677845237696e-05,
"loss": 7.0975,
"step": 156
},
{
"epoch": 0.04222984331921189,
"grad_norm": 0.9602285623550415,
"learning_rate": 3.634276953594982e-05,
"loss": 7.4331,
"step": 157
},
{
"epoch": 0.04249882321296483,
"grad_norm": 0.4766939878463745,
"learning_rate": 3.473983337147118e-05,
"loss": 6.4512,
"step": 158
},
{
"epoch": 0.042767803106717774,
"grad_norm": 0.6891216039657593,
"learning_rate": 3.316840818568315e-05,
"loss": 6.2009,
"step": 159
},
{
"epoch": 0.043036783000470716,
"grad_norm": 0.7973335385322571,
"learning_rate": 3.162892359054098e-05,
"loss": 6.0294,
"step": 160
},
{
"epoch": 0.04330576289422366,
"grad_norm": 0.7474548816680908,
"learning_rate": 3.0121800465761293e-05,
"loss": 6.385,
"step": 161
},
{
"epoch": 0.0435747427879766,
"grad_norm": 0.5858322381973267,
"learning_rate": 2.8647450843757897e-05,
"loss": 6.2609,
"step": 162
},
{
"epoch": 0.04384372268172954,
"grad_norm": 0.7844979763031006,
"learning_rate": 2.7206277796996144e-05,
"loss": 6.3406,
"step": 163
},
{
"epoch": 0.044112702575482485,
"grad_norm": 1.182106614112854,
"learning_rate": 2.5798675327796993e-05,
"loss": 6.4119,
"step": 164
},
{
"epoch": 0.04438168246923543,
"grad_norm": 0.9624720215797424,
"learning_rate": 2.4425028260620715e-05,
"loss": 6.6251,
"step": 165
},
{
"epoch": 0.04465066236298837,
"grad_norm": 0.8138177990913391,
"learning_rate": 2.3085712136859668e-05,
"loss": 7.0401,
"step": 166
},
{
"epoch": 0.04491964225674131,
"grad_norm": 0.5879483222961426,
"learning_rate": 2.178109311216913e-05,
"loss": 6.9187,
"step": 167
},
{
"epoch": 0.045188622150494254,
"grad_norm": 1.2505601644515991,
"learning_rate": 2.0511527856363912e-05,
"loss": 7.2914,
"step": 168
},
{
"epoch": 0.045457602044247196,
"grad_norm": 1.0231761932373047,
"learning_rate": 1.927736345590839e-05,
"loss": 6.8207,
"step": 169
},
{
"epoch": 0.04572658193800013,
"grad_norm": 0.8401983976364136,
"learning_rate": 1.8078937319026654e-05,
"loss": 7.1883,
"step": 170
},
{
"epoch": 0.04599556183175307,
"grad_norm": 0.5049845576286316,
"learning_rate": 1.6916577083458228e-05,
"loss": 6.8629,
"step": 171
},
{
"epoch": 0.046264541725506016,
"grad_norm": 0.7277731895446777,
"learning_rate": 1.579060052688548e-05,
"loss": 7.2852,
"step": 172
},
{
"epoch": 0.04653352161925896,
"grad_norm": 0.977936863899231,
"learning_rate": 1.4701315480056164e-05,
"loss": 6.9577,
"step": 173
},
{
"epoch": 0.0468025015130119,
"grad_norm": 0.7111912369728088,
"learning_rate": 1.3649019742625623e-05,
"loss": 7.0556,
"step": 174
},
{
"epoch": 0.04707148140676484,
"grad_norm": 0.7338117361068726,
"learning_rate": 1.2634001001741373e-05,
"loss": 7.2456,
"step": 175
},
{
"epoch": 0.04707148140676484,
"eval_loss": 7.184196949005127,
"eval_runtime": 1.074,
"eval_samples_per_second": 46.556,
"eval_steps_per_second": 6.518,
"step": 175
},
{
"epoch": 0.047340461300517785,
"grad_norm": 0.5232920050621033,
"learning_rate": 1.1656536753392287e-05,
"loss": 6.4342,
"step": 176
},
{
"epoch": 0.04760944119427073,
"grad_norm": 0.45352447032928467,
"learning_rate": 1.0716894226543953e-05,
"loss": 6.9281,
"step": 177
},
{
"epoch": 0.04787842108802367,
"grad_norm": 0.611677885055542,
"learning_rate": 9.815330310080887e-06,
"loss": 7.319,
"step": 178
},
{
"epoch": 0.04814740098177661,
"grad_norm": 0.9648262858390808,
"learning_rate": 8.952091482575824e-06,
"loss": 6.9275,
"step": 179
},
{
"epoch": 0.048416380875529554,
"grad_norm": 0.7132251262664795,
"learning_rate": 8.127413744904804e-06,
"loss": 6.5056,
"step": 180
},
{
"epoch": 0.048685360769282496,
"grad_norm": 0.5821110606193542,
"learning_rate": 7.34152255572697e-06,
"loss": 6.84,
"step": 181
},
{
"epoch": 0.04895434066303544,
"grad_norm": 0.7894052267074585,
"learning_rate": 6.594632769846353e-06,
"loss": 6.9713,
"step": 182
},
{
"epoch": 0.04922332055678838,
"grad_norm": 0.7041998505592346,
"learning_rate": 5.886948579472778e-06,
"loss": 6.4816,
"step": 183
},
{
"epoch": 0.04949230045054132,
"grad_norm": 0.5108593702316284,
"learning_rate": 5.218663458397715e-06,
"loss": 7.2662,
"step": 184
},
{
"epoch": 0.049761280344294265,
"grad_norm": 0.6600824594497681,
"learning_rate": 4.589960109100444e-06,
"loss": 7.2239,
"step": 185
},
{
"epoch": 0.05003026023804721,
"grad_norm": 0.679880678653717,
"learning_rate": 4.001010412799138e-06,
"loss": 7.0421,
"step": 186
},
{
"epoch": 0.05029924013180015,
"grad_norm": 0.5693497657775879,
"learning_rate": 3.451975382460109e-06,
"loss": 7.0777,
"step": 187
},
{
"epoch": 0.05056822002555309,
"grad_norm": 0.6687301397323608,
"learning_rate": 2.9430051187785962e-06,
"loss": 6.6986,
"step": 188
},
{
"epoch": 0.050837199919306034,
"grad_norm": 0.7745783925056458,
"learning_rate": 2.4742387691426445e-06,
"loss": 7.0626,
"step": 189
},
{
"epoch": 0.051106179813058976,
"grad_norm": 0.5398703813552856,
"learning_rate": 2.0458044895916513e-06,
"loss": 7.5387,
"step": 190
},
{
"epoch": 0.05137515970681192,
"grad_norm": 0.5536245703697205,
"learning_rate": 1.6578194097797258e-06,
"loss": 6.8114,
"step": 191
},
{
"epoch": 0.05164413960056486,
"grad_norm": 0.8733762502670288,
"learning_rate": 1.3103896009537207e-06,
"loss": 7.109,
"step": 192
},
{
"epoch": 0.0519131194943178,
"grad_norm": 0.9818241000175476,
"learning_rate": 1.0036100469542786e-06,
"loss": 6.679,
"step": 193
},
{
"epoch": 0.052182099388070745,
"grad_norm": 0.9805580377578735,
"learning_rate": 7.375646182482875e-07,
"loss": 6.5897,
"step": 194
},
{
"epoch": 0.05245107928182369,
"grad_norm": 1.4828003644943237,
"learning_rate": 5.123260489995229e-07,
"loss": 7.2332,
"step": 195
},
{
"epoch": 0.05272005917557662,
"grad_norm": 1.0649274587631226,
"learning_rate": 3.2795591718381975e-07,
"loss": 6.9496,
"step": 196
},
{
"epoch": 0.052989039069329565,
"grad_norm": 1.1552672386169434,
"learning_rate": 1.8450462775428942e-07,
"loss": 6.3958,
"step": 197
},
{
"epoch": 0.05325801896308251,
"grad_norm": 1.6000089645385742,
"learning_rate": 8.201139886109264e-08,
"loss": 7.0364,
"step": 198
},
{
"epoch": 0.05352699885683545,
"grad_norm": 0.86728435754776,
"learning_rate": 2.0504251129649374e-08,
"loss": 7.7624,
"step": 199
},
{
"epoch": 0.05379597875058839,
"grad_norm": 1.0310410261154175,
"learning_rate": 0.0,
"loss": 7.2514,
"step": 200
},
{
"epoch": 0.05379597875058839,
"eval_loss": 7.1964921951293945,
"eval_runtime": 1.1003,
"eval_samples_per_second": 45.441,
"eval_steps_per_second": 6.362,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 174087813267456.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}