bbytxt's picture
Training in progress, step 100, checkpoint
d902b17 verified
raw
history blame
19.4 kB
{
"best_metric": 7.306600570678711,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.026897989375294196,
"eval_steps": 25,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00026897989375294196,
"grad_norm": 14.062777519226074,
"learning_rate": 2.9999999999999997e-05,
"loss": 9.9528,
"step": 1
},
{
"epoch": 0.00026897989375294196,
"eval_loss": 10.383614540100098,
"eval_runtime": 1.103,
"eval_samples_per_second": 45.331,
"eval_steps_per_second": 6.346,
"step": 1
},
{
"epoch": 0.0005379597875058839,
"grad_norm": 9.013472557067871,
"learning_rate": 5.9999999999999995e-05,
"loss": 10.4374,
"step": 2
},
{
"epoch": 0.0008069396812588259,
"grad_norm": 9.113578796386719,
"learning_rate": 8.999999999999999e-05,
"loss": 10.5416,
"step": 3
},
{
"epoch": 0.0010759195750117679,
"grad_norm": 9.272452354431152,
"learning_rate": 0.00011999999999999999,
"loss": 10.5582,
"step": 4
},
{
"epoch": 0.0013448994687647099,
"grad_norm": 9.502043724060059,
"learning_rate": 0.00015,
"loss": 10.4131,
"step": 5
},
{
"epoch": 0.0016138793625176519,
"grad_norm": 12.628140449523926,
"learning_rate": 0.00017999999999999998,
"loss": 9.5481,
"step": 6
},
{
"epoch": 0.0018828592562705937,
"grad_norm": 10.771757125854492,
"learning_rate": 0.00020999999999999998,
"loss": 9.7362,
"step": 7
},
{
"epoch": 0.0021518391500235357,
"grad_norm": 8.7677640914917,
"learning_rate": 0.00023999999999999998,
"loss": 9.394,
"step": 8
},
{
"epoch": 0.0024208190437764775,
"grad_norm": 9.826272010803223,
"learning_rate": 0.00027,
"loss": 8.7301,
"step": 9
},
{
"epoch": 0.0026897989375294197,
"grad_norm": 8.201698303222656,
"learning_rate": 0.0003,
"loss": 8.4353,
"step": 10
},
{
"epoch": 0.0029587788312823615,
"grad_norm": 6.270658493041992,
"learning_rate": 0.0002999794957488703,
"loss": 7.7853,
"step": 11
},
{
"epoch": 0.0032277587250353038,
"grad_norm": 5.651599884033203,
"learning_rate": 0.0002999179886011389,
"loss": 7.7574,
"step": 12
},
{
"epoch": 0.0034967386187882456,
"grad_norm": 3.6305131912231445,
"learning_rate": 0.0002998154953722457,
"loss": 7.9822,
"step": 13
},
{
"epoch": 0.0037657185125411874,
"grad_norm": 3.2104008197784424,
"learning_rate": 0.00029967204408281613,
"loss": 8.3468,
"step": 14
},
{
"epoch": 0.004034698406294129,
"grad_norm": 2.4810068607330322,
"learning_rate": 0.00029948767395100045,
"loss": 8.2438,
"step": 15
},
{
"epoch": 0.004303678300047071,
"grad_norm": 2.427516460418701,
"learning_rate": 0.0002992624353817517,
"loss": 8.4045,
"step": 16
},
{
"epoch": 0.004572658193800014,
"grad_norm": 1.9261265993118286,
"learning_rate": 0.0002989963899530457,
"loss": 7.9577,
"step": 17
},
{
"epoch": 0.004841638087552955,
"grad_norm": 2.021862268447876,
"learning_rate": 0.00029868961039904624,
"loss": 8.1164,
"step": 18
},
{
"epoch": 0.005110617981305897,
"grad_norm": 2.315554141998291,
"learning_rate": 0.00029834218059022024,
"loss": 8.1393,
"step": 19
},
{
"epoch": 0.0053795978750588395,
"grad_norm": 2.343036651611328,
"learning_rate": 0.00029795419551040833,
"loss": 7.8621,
"step": 20
},
{
"epoch": 0.005648577768811782,
"grad_norm": 1.6685760021209717,
"learning_rate": 0.00029752576123085736,
"loss": 7.6249,
"step": 21
},
{
"epoch": 0.005917557662564723,
"grad_norm": 1.617172360420227,
"learning_rate": 0.0002970569948812214,
"loss": 7.8443,
"step": 22
},
{
"epoch": 0.006186537556317665,
"grad_norm": 1.611405611038208,
"learning_rate": 0.0002965480246175399,
"loss": 7.1947,
"step": 23
},
{
"epoch": 0.0064555174500706075,
"grad_norm": 1.3807909488677979,
"learning_rate": 0.0002959989895872009,
"loss": 7.7691,
"step": 24
},
{
"epoch": 0.006724497343823549,
"grad_norm": 1.8649704456329346,
"learning_rate": 0.0002954100398908995,
"loss": 7.6449,
"step": 25
},
{
"epoch": 0.006724497343823549,
"eval_loss": 7.789085865020752,
"eval_runtime": 1.1023,
"eval_samples_per_second": 45.359,
"eval_steps_per_second": 6.35,
"step": 25
},
{
"epoch": 0.006993477237576491,
"grad_norm": 1.826485276222229,
"learning_rate": 0.0002947813365416023,
"loss": 7.775,
"step": 26
},
{
"epoch": 0.007262457131329433,
"grad_norm": 2.0122640132904053,
"learning_rate": 0.0002941130514205272,
"loss": 7.4112,
"step": 27
},
{
"epoch": 0.007531437025082375,
"grad_norm": 1.474570870399475,
"learning_rate": 0.0002934053672301536,
"loss": 7.605,
"step": 28
},
{
"epoch": 0.007800416918835317,
"grad_norm": 1.2456080913543701,
"learning_rate": 0.00029265847744427303,
"loss": 7.615,
"step": 29
},
{
"epoch": 0.008069396812588258,
"grad_norm": 1.3614413738250732,
"learning_rate": 0.00029187258625509513,
"loss": 7.1255,
"step": 30
},
{
"epoch": 0.0083383767063412,
"grad_norm": 1.5178754329681396,
"learning_rate": 0.00029104790851742417,
"loss": 7.3523,
"step": 31
},
{
"epoch": 0.008607356600094143,
"grad_norm": 1.0982545614242554,
"learning_rate": 0.0002901846696899191,
"loss": 7.2886,
"step": 32
},
{
"epoch": 0.008876336493847085,
"grad_norm": 0.9949588775634766,
"learning_rate": 0.00028928310577345606,
"loss": 7.705,
"step": 33
},
{
"epoch": 0.009145316387600027,
"grad_norm": 1.0565680265426636,
"learning_rate": 0.0002883434632466077,
"loss": 7.054,
"step": 34
},
{
"epoch": 0.00941429628135297,
"grad_norm": 1.4660252332687378,
"learning_rate": 0.00028736599899825856,
"loss": 6.5231,
"step": 35
},
{
"epoch": 0.00968327617510591,
"grad_norm": 1.0093355178833008,
"learning_rate": 0.00028635098025737434,
"loss": 7.4915,
"step": 36
},
{
"epoch": 0.009952256068858852,
"grad_norm": 1.0597023963928223,
"learning_rate": 0.00028529868451994384,
"loss": 7.5809,
"step": 37
},
{
"epoch": 0.010221235962611794,
"grad_norm": 1.0300654172897339,
"learning_rate": 0.0002842093994731145,
"loss": 7.4441,
"step": 38
},
{
"epoch": 0.010490215856364737,
"grad_norm": 1.0486762523651123,
"learning_rate": 0.00028308342291654174,
"loss": 6.7324,
"step": 39
},
{
"epoch": 0.010759195750117679,
"grad_norm": 0.9966297149658203,
"learning_rate": 0.00028192106268097334,
"loss": 7.7486,
"step": 40
},
{
"epoch": 0.011028175643870621,
"grad_norm": 0.8546872735023499,
"learning_rate": 0.00028072263654409154,
"loss": 6.9361,
"step": 41
},
{
"epoch": 0.011297155537623563,
"grad_norm": 0.9719182252883911,
"learning_rate": 0.0002794884721436361,
"loss": 7.2164,
"step": 42
},
{
"epoch": 0.011566135431376504,
"grad_norm": 1.2323123216629028,
"learning_rate": 0.00027821890688783083,
"loss": 6.7995,
"step": 43
},
{
"epoch": 0.011835115325129446,
"grad_norm": 1.016748070716858,
"learning_rate": 0.0002769142878631403,
"loss": 7.3677,
"step": 44
},
{
"epoch": 0.012104095218882388,
"grad_norm": 0.9493099451065063,
"learning_rate": 0.00027557497173937923,
"loss": 6.8809,
"step": 45
},
{
"epoch": 0.01237307511263533,
"grad_norm": 0.6532073020935059,
"learning_rate": 0.000274201324672203,
"loss": 6.9965,
"step": 46
},
{
"epoch": 0.012642055006388273,
"grad_norm": 1.0315707921981812,
"learning_rate": 0.00027279372220300385,
"loss": 7.3371,
"step": 47
},
{
"epoch": 0.012911034900141215,
"grad_norm": 0.867978036403656,
"learning_rate": 0.0002713525491562421,
"loss": 7.2436,
"step": 48
},
{
"epoch": 0.013180014793894156,
"grad_norm": 0.8053029775619507,
"learning_rate": 0.00026987819953423867,
"loss": 6.8425,
"step": 49
},
{
"epoch": 0.013448994687647098,
"grad_norm": 1.2040008306503296,
"learning_rate": 0.00026837107640945905,
"loss": 6.9863,
"step": 50
},
{
"epoch": 0.013448994687647098,
"eval_loss": 7.4686126708984375,
"eval_runtime": 1.1025,
"eval_samples_per_second": 45.352,
"eval_steps_per_second": 6.349,
"step": 50
},
{
"epoch": 0.01371797458140004,
"grad_norm": 2.073488235473633,
"learning_rate": 0.0002668315918143169,
"loss": 6.6067,
"step": 51
},
{
"epoch": 0.013986954475152982,
"grad_norm": 1.6959115266799927,
"learning_rate": 0.00026526016662852886,
"loss": 7.4615,
"step": 52
},
{
"epoch": 0.014255934368905925,
"grad_norm": 1.1312484741210938,
"learning_rate": 0.00026365723046405023,
"loss": 7.562,
"step": 53
},
{
"epoch": 0.014524914262658867,
"grad_norm": 0.8906031847000122,
"learning_rate": 0.0002620232215476231,
"loss": 7.8548,
"step": 54
},
{
"epoch": 0.014793894156411809,
"grad_norm": 1.1955941915512085,
"learning_rate": 0.0002603585866009697,
"loss": 7.5388,
"step": 55
},
{
"epoch": 0.01506287405016475,
"grad_norm": 1.6056780815124512,
"learning_rate": 0.00025866378071866334,
"loss": 6.7458,
"step": 56
},
{
"epoch": 0.015331853943917692,
"grad_norm": 0.9653465151786804,
"learning_rate": 0.00025693926724370956,
"loss": 7.2338,
"step": 57
},
{
"epoch": 0.015600833837670634,
"grad_norm": 1.055378794670105,
"learning_rate": 0.00025518551764087326,
"loss": 7.2497,
"step": 58
},
{
"epoch": 0.015869813731423574,
"grad_norm": 0.982029378414154,
"learning_rate": 0.00025340301136778483,
"loss": 6.3055,
"step": 59
},
{
"epoch": 0.016138793625176517,
"grad_norm": 1.4051647186279297,
"learning_rate": 0.00025159223574386114,
"loss": 6.7392,
"step": 60
},
{
"epoch": 0.01640777351892946,
"grad_norm": 1.224535346031189,
"learning_rate": 0.0002497536858170772,
"loss": 6.2815,
"step": 61
},
{
"epoch": 0.0166767534126824,
"grad_norm": 0.9965676069259644,
"learning_rate": 0.00024788786422862526,
"loss": 6.5987,
"step": 62
},
{
"epoch": 0.016945733306435343,
"grad_norm": 0.7063784599304199,
"learning_rate": 0.00024599528107549745,
"loss": 6.5176,
"step": 63
},
{
"epoch": 0.017214713200188286,
"grad_norm": 1.3905584812164307,
"learning_rate": 0.00024407645377103054,
"loss": 7.3527,
"step": 64
},
{
"epoch": 0.017483693093941228,
"grad_norm": 0.6794236898422241,
"learning_rate": 0.00024213190690345018,
"loss": 7.1635,
"step": 65
},
{
"epoch": 0.01775267298769417,
"grad_norm": 0.85134357213974,
"learning_rate": 0.00024016217209245374,
"loss": 6.9162,
"step": 66
},
{
"epoch": 0.018021652881447112,
"grad_norm": 1.1831365823745728,
"learning_rate": 0.00023816778784387094,
"loss": 7.4826,
"step": 67
},
{
"epoch": 0.018290632775200055,
"grad_norm": 0.7540268301963806,
"learning_rate": 0.0002361492994024415,
"loss": 7.5541,
"step": 68
},
{
"epoch": 0.018559612668952997,
"grad_norm": 0.8305040597915649,
"learning_rate": 0.0002341072586027509,
"loss": 7.0236,
"step": 69
},
{
"epoch": 0.01882859256270594,
"grad_norm": 0.8232185244560242,
"learning_rate": 0.00023204222371836405,
"loss": 7.256,
"step": 70
},
{
"epoch": 0.01909757245645888,
"grad_norm": 0.8017435669898987,
"learning_rate": 0.00022995475930919905,
"loss": 7.2468,
"step": 71
},
{
"epoch": 0.01936655235021182,
"grad_norm": 0.7299101948738098,
"learning_rate": 0.00022784543606718227,
"loss": 6.8815,
"step": 72
},
{
"epoch": 0.019635532243964762,
"grad_norm": 0.9603165984153748,
"learning_rate": 0.00022571483066022657,
"loss": 7.1806,
"step": 73
},
{
"epoch": 0.019904512137717705,
"grad_norm": 0.7412318587303162,
"learning_rate": 0.0002235635255745762,
"loss": 6.8687,
"step": 74
},
{
"epoch": 0.020173492031470647,
"grad_norm": 1.5883235931396484,
"learning_rate": 0.00022139210895556104,
"loss": 6.6477,
"step": 75
},
{
"epoch": 0.020173492031470647,
"eval_loss": 7.266612529754639,
"eval_runtime": 1.1026,
"eval_samples_per_second": 45.347,
"eval_steps_per_second": 6.349,
"step": 75
},
{
"epoch": 0.02044247192522359,
"grad_norm": 0.874472975730896,
"learning_rate": 0.00021920117444680317,
"loss": 6.5988,
"step": 76
},
{
"epoch": 0.02071145181897653,
"grad_norm": 1.0724687576293945,
"learning_rate": 0.00021699132102792097,
"loss": 7.0543,
"step": 77
},
{
"epoch": 0.020980431712729473,
"grad_norm": 0.7699567079544067,
"learning_rate": 0.0002147631528507739,
"loss": 7.1625,
"step": 78
},
{
"epoch": 0.021249411606482416,
"grad_norm": 0.622986912727356,
"learning_rate": 0.00021251727907429355,
"loss": 7.4872,
"step": 79
},
{
"epoch": 0.021518391500235358,
"grad_norm": 0.9322075843811035,
"learning_rate": 0.0002102543136979454,
"loss": 7.0894,
"step": 80
},
{
"epoch": 0.0217873713939883,
"grad_norm": 0.6225010752677917,
"learning_rate": 0.0002079748753938678,
"loss": 6.9691,
"step": 81
},
{
"epoch": 0.022056351287741242,
"grad_norm": 0.9002844095230103,
"learning_rate": 0.0002056795873377331,
"loss": 6.5822,
"step": 82
},
{
"epoch": 0.022325331181494185,
"grad_norm": 0.6348511576652527,
"learning_rate": 0.00020336907703837748,
"loss": 6.8699,
"step": 83
},
{
"epoch": 0.022594311075247127,
"grad_norm": 0.5231301188468933,
"learning_rate": 0.00020104397616624645,
"loss": 6.8725,
"step": 84
},
{
"epoch": 0.022863290969000066,
"grad_norm": 0.68709397315979,
"learning_rate": 0.00019870492038070252,
"loss": 6.7312,
"step": 85
},
{
"epoch": 0.023132270862753008,
"grad_norm": 0.47030457854270935,
"learning_rate": 0.0001963525491562421,
"loss": 6.7572,
"step": 86
},
{
"epoch": 0.02340125075650595,
"grad_norm": 0.7477748394012451,
"learning_rate": 0.0001939875056076697,
"loss": 7.014,
"step": 87
},
{
"epoch": 0.023670230650258892,
"grad_norm": 0.9135372638702393,
"learning_rate": 0.00019161043631427666,
"loss": 6.8656,
"step": 88
},
{
"epoch": 0.023939210544011835,
"grad_norm": 0.7729247212409973,
"learning_rate": 0.00018922199114307294,
"loss": 6.8811,
"step": 89
},
{
"epoch": 0.024208190437764777,
"grad_norm": 0.8843768835067749,
"learning_rate": 0.00018682282307111987,
"loss": 6.8623,
"step": 90
},
{
"epoch": 0.02447717033151772,
"grad_norm": 0.5804451107978821,
"learning_rate": 0.00018441358800701273,
"loss": 6.8921,
"step": 91
},
{
"epoch": 0.02474615022527066,
"grad_norm": 0.9439037442207336,
"learning_rate": 0.00018199494461156203,
"loss": 7.4667,
"step": 92
},
{
"epoch": 0.025015130119023603,
"grad_norm": 0.6652296185493469,
"learning_rate": 0.000179567554117722,
"loss": 7.1213,
"step": 93
},
{
"epoch": 0.025284110012776546,
"grad_norm": 0.9313641786575317,
"learning_rate": 0.00017713208014981648,
"loss": 6.9055,
"step": 94
},
{
"epoch": 0.025553089906529488,
"grad_norm": 0.5718141198158264,
"learning_rate": 0.00017468918854211007,
"loss": 7.4156,
"step": 95
},
{
"epoch": 0.02582206980028243,
"grad_norm": 0.90057772397995,
"learning_rate": 0.00017223954715677627,
"loss": 7.0547,
"step": 96
},
{
"epoch": 0.026091049694035372,
"grad_norm": 0.8088359236717224,
"learning_rate": 0.00016978382570131034,
"loss": 6.8638,
"step": 97
},
{
"epoch": 0.02636002958778831,
"grad_norm": 0.7129319310188293,
"learning_rate": 0.00016732269554543794,
"loss": 7.0757,
"step": 98
},
{
"epoch": 0.026629009481541253,
"grad_norm": 0.7212324142456055,
"learning_rate": 0.00016485682953756942,
"loss": 7.3563,
"step": 99
},
{
"epoch": 0.026897989375294196,
"grad_norm": 1.3920371532440186,
"learning_rate": 0.00016238690182084986,
"loss": 7.3419,
"step": 100
},
{
"epoch": 0.026897989375294196,
"eval_loss": 7.306600570678711,
"eval_runtime": 1.1005,
"eval_samples_per_second": 45.435,
"eval_steps_per_second": 6.361,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 87404486197248.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}