dada22231's picture
Training in progress, step 95, checkpoint
9771293 verified
raw
history blame
18.3 kB
{
"best_metric": 0.623828649520874,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 0.12319163593629695,
"eval_steps": 25,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012967540624873365,
"grad_norm": 16.785573959350586,
"learning_rate": 3.3333333333333335e-05,
"loss": 43.7265,
"step": 1
},
{
"epoch": 0.0012967540624873365,
"eval_loss": 1.705428123474121,
"eval_runtime": 2.9276,
"eval_samples_per_second": 17.079,
"eval_steps_per_second": 4.441,
"step": 1
},
{
"epoch": 0.002593508124974673,
"grad_norm": 28.982810974121094,
"learning_rate": 6.666666666666667e-05,
"loss": 42.8946,
"step": 2
},
{
"epoch": 0.0038902621874620093,
"grad_norm": 32.87180709838867,
"learning_rate": 0.0001,
"loss": 40.5252,
"step": 3
},
{
"epoch": 0.005187016249949346,
"grad_norm": 44.707279205322266,
"learning_rate": 9.997376600647783e-05,
"loss": 37.9476,
"step": 4
},
{
"epoch": 0.006483770312436682,
"grad_norm": 36.72836685180664,
"learning_rate": 9.989509461357426e-05,
"loss": 32.5188,
"step": 5
},
{
"epoch": 0.0077805243749240185,
"grad_norm": 38.171810150146484,
"learning_rate": 9.976407754861426e-05,
"loss": 25.5456,
"step": 6
},
{
"epoch": 0.009077278437411355,
"grad_norm": 57.68375015258789,
"learning_rate": 9.958086757163489e-05,
"loss": 23.8212,
"step": 7
},
{
"epoch": 0.010374032499898692,
"grad_norm": 31.480140686035156,
"learning_rate": 9.934567829727386e-05,
"loss": 17.8454,
"step": 8
},
{
"epoch": 0.011670786562386027,
"grad_norm": 31.195682525634766,
"learning_rate": 9.905878394570453e-05,
"loss": 21.7711,
"step": 9
},
{
"epoch": 0.012967540624873364,
"grad_norm": 27.705665588378906,
"learning_rate": 9.872051902290737e-05,
"loss": 27.2241,
"step": 10
},
{
"epoch": 0.0142642946873607,
"grad_norm": 27.043445587158203,
"learning_rate": 9.833127793065098e-05,
"loss": 33.6918,
"step": 11
},
{
"epoch": 0.015561048749848037,
"grad_norm": 33.10136413574219,
"learning_rate": 9.789151450663723e-05,
"loss": 42.3679,
"step": 12
},
{
"epoch": 0.016857802812335374,
"grad_norm": 22.521921157836914,
"learning_rate": 9.740174149534693e-05,
"loss": 41.087,
"step": 13
},
{
"epoch": 0.01815455687482271,
"grad_norm": 19.09137535095215,
"learning_rate": 9.686252995020249e-05,
"loss": 34.6201,
"step": 14
},
{
"epoch": 0.019451310937310045,
"grad_norm": 15.93561840057373,
"learning_rate": 9.627450856774539e-05,
"loss": 28.9021,
"step": 15
},
{
"epoch": 0.020748064999797384,
"grad_norm": 13.309250831604004,
"learning_rate": 9.563836295460398e-05,
"loss": 19.9195,
"step": 16
},
{
"epoch": 0.02204481906228472,
"grad_norm": 10.42359447479248,
"learning_rate": 9.495483482810688e-05,
"loss": 11.9618,
"step": 17
},
{
"epoch": 0.023341573124772055,
"grad_norm": 15.537765502929688,
"learning_rate": 9.422472115147382e-05,
"loss": 14.3353,
"step": 18
},
{
"epoch": 0.02463832718725939,
"grad_norm": 11.342974662780762,
"learning_rate": 9.3448873204592e-05,
"loss": 9.9775,
"step": 19
},
{
"epoch": 0.02593508124974673,
"grad_norm": 14.146580696105957,
"learning_rate": 9.2628195591462e-05,
"loss": 14.0379,
"step": 20
},
{
"epoch": 0.027231835312234064,
"grad_norm": 15.346806526184082,
"learning_rate": 9.176364518546989e-05,
"loss": 13.1845,
"step": 21
},
{
"epoch": 0.0285285893747214,
"grad_norm": 14.695765495300293,
"learning_rate": 9.08562300137157e-05,
"loss": 18.4925,
"step": 22
},
{
"epoch": 0.029825343437208735,
"grad_norm": 15.247776985168457,
"learning_rate": 8.990700808169889e-05,
"loss": 23.5953,
"step": 23
},
{
"epoch": 0.031122097499696074,
"grad_norm": 19.08445930480957,
"learning_rate": 8.891708613973126e-05,
"loss": 37.6228,
"step": 24
},
{
"epoch": 0.03241885156218341,
"grad_norm": 23.201744079589844,
"learning_rate": 8.788761839251559e-05,
"loss": 35.105,
"step": 25
},
{
"epoch": 0.03241885156218341,
"eval_loss": 0.7101312875747681,
"eval_runtime": 2.9691,
"eval_samples_per_second": 16.84,
"eval_steps_per_second": 4.378,
"step": 25
},
{
"epoch": 0.03371560562467075,
"grad_norm": 9.792278289794922,
"learning_rate": 8.681980515339464e-05,
"loss": 36.8223,
"step": 26
},
{
"epoch": 0.03501235968715808,
"grad_norm": 15.209010124206543,
"learning_rate": 8.571489144483944e-05,
"loss": 28.8723,
"step": 27
},
{
"epoch": 0.03630911374964542,
"grad_norm": 15.312687873840332,
"learning_rate": 8.457416554680877e-05,
"loss": 20.8671,
"step": 28
},
{
"epoch": 0.03760586781213276,
"grad_norm": 11.13228988647461,
"learning_rate": 8.339895749467238e-05,
"loss": 15.874,
"step": 29
},
{
"epoch": 0.03890262187462009,
"grad_norm": 12.170778274536133,
"learning_rate": 8.219063752844926e-05,
"loss": 9.841,
"step": 30
},
{
"epoch": 0.04019937593710743,
"grad_norm": 8.809076309204102,
"learning_rate": 8.095061449516903e-05,
"loss": 12.2626,
"step": 31
},
{
"epoch": 0.04149612999959477,
"grad_norm": 8.72727108001709,
"learning_rate": 7.968033420621935e-05,
"loss": 9.9618,
"step": 32
},
{
"epoch": 0.0427928840620821,
"grad_norm": 8.521458625793457,
"learning_rate": 7.838127775159452e-05,
"loss": 11.9306,
"step": 33
},
{
"epoch": 0.04408963812456944,
"grad_norm": 10.72695255279541,
"learning_rate": 7.705495977301078e-05,
"loss": 17.8682,
"step": 34
},
{
"epoch": 0.04538639218705677,
"grad_norm": 12.048954963684082,
"learning_rate": 7.570292669790186e-05,
"loss": 24.6443,
"step": 35
},
{
"epoch": 0.04668314624954411,
"grad_norm": 14.231986045837402,
"learning_rate": 7.43267549363537e-05,
"loss": 30.6334,
"step": 36
},
{
"epoch": 0.04797990031203145,
"grad_norm": 16.270370483398438,
"learning_rate": 7.292804904308087e-05,
"loss": 36.3834,
"step": 37
},
{
"epoch": 0.04927665437451878,
"grad_norm": 14.480351448059082,
"learning_rate": 7.150843984658754e-05,
"loss": 41.1031,
"step": 38
},
{
"epoch": 0.05057340843700612,
"grad_norm": 9.69229507446289,
"learning_rate": 7.006958254769438e-05,
"loss": 30.1462,
"step": 39
},
{
"epoch": 0.05187016249949346,
"grad_norm": 10.705002784729004,
"learning_rate": 6.861315478964841e-05,
"loss": 23.4604,
"step": 40
},
{
"epoch": 0.05316691656198079,
"grad_norm": 11.107697486877441,
"learning_rate": 6.714085470206609e-05,
"loss": 13.1305,
"step": 41
},
{
"epoch": 0.05446367062446813,
"grad_norm": 7.914254665374756,
"learning_rate": 6.56543989209901e-05,
"loss": 10.4824,
"step": 42
},
{
"epoch": 0.05576042468695547,
"grad_norm": 7.241426467895508,
"learning_rate": 6.415552058736854e-05,
"loss": 12.7372,
"step": 43
},
{
"epoch": 0.0570571787494428,
"grad_norm": 7.243592262268066,
"learning_rate": 6.264596732629e-05,
"loss": 8.7167,
"step": 44
},
{
"epoch": 0.05835393281193014,
"grad_norm": 8.968466758728027,
"learning_rate": 6.112749920933111e-05,
"loss": 11.5413,
"step": 45
},
{
"epoch": 0.05965068687441747,
"grad_norm": 10.867255210876465,
"learning_rate": 5.960188670239154e-05,
"loss": 14.4485,
"step": 46
},
{
"epoch": 0.06094744093690481,
"grad_norm": 10.397505760192871,
"learning_rate": 5.80709086014102e-05,
"loss": 14.4408,
"step": 47
},
{
"epoch": 0.06224419499939215,
"grad_norm": 11.32922077178955,
"learning_rate": 5.653634995836856e-05,
"loss": 21.16,
"step": 48
},
{
"epoch": 0.06354094906187949,
"grad_norm": 15.780669212341309,
"learning_rate": 5.500000000000001e-05,
"loss": 29.0683,
"step": 49
},
{
"epoch": 0.06483770312436682,
"grad_norm": 21.16714096069336,
"learning_rate": 5.346365004163145e-05,
"loss": 35.8908,
"step": 50
},
{
"epoch": 0.06483770312436682,
"eval_loss": 0.652353048324585,
"eval_runtime": 2.9375,
"eval_samples_per_second": 17.021,
"eval_steps_per_second": 4.426,
"step": 50
},
{
"epoch": 0.06613445718685415,
"grad_norm": 11.396697044372559,
"learning_rate": 5.192909139858981e-05,
"loss": 32.6268,
"step": 51
},
{
"epoch": 0.0674312112493415,
"grad_norm": 13.8756742477417,
"learning_rate": 5.0398113297608465e-05,
"loss": 30.8687,
"step": 52
},
{
"epoch": 0.06872796531182883,
"grad_norm": 11.302550315856934,
"learning_rate": 4.887250079066892e-05,
"loss": 20.7947,
"step": 53
},
{
"epoch": 0.07002471937431616,
"grad_norm": 8.959757804870605,
"learning_rate": 4.7354032673710005e-05,
"loss": 14.8,
"step": 54
},
{
"epoch": 0.0713214734368035,
"grad_norm": 9.0787992477417,
"learning_rate": 4.584447941263149e-05,
"loss": 10.1572,
"step": 55
},
{
"epoch": 0.07261822749929084,
"grad_norm": 6.953489303588867,
"learning_rate": 4.43456010790099e-05,
"loss": 6.886,
"step": 56
},
{
"epoch": 0.07391498156177817,
"grad_norm": 6.784447193145752,
"learning_rate": 4.285914529793391e-05,
"loss": 10.6766,
"step": 57
},
{
"epoch": 0.07521173562426552,
"grad_norm": 7.345515251159668,
"learning_rate": 4.13868452103516e-05,
"loss": 13.2427,
"step": 58
},
{
"epoch": 0.07650848968675285,
"grad_norm": 10.04677963256836,
"learning_rate": 3.9930417452305626e-05,
"loss": 15.7304,
"step": 59
},
{
"epoch": 0.07780524374924018,
"grad_norm": 13.683038711547852,
"learning_rate": 3.8491560153412466e-05,
"loss": 18.4109,
"step": 60
},
{
"epoch": 0.07910199781172753,
"grad_norm": 12.892768859863281,
"learning_rate": 3.707195095691913e-05,
"loss": 22.273,
"step": 61
},
{
"epoch": 0.08039875187421486,
"grad_norm": 17.1517391204834,
"learning_rate": 3.567324506364632e-05,
"loss": 32.1346,
"step": 62
},
{
"epoch": 0.08169550593670219,
"grad_norm": 18.07764434814453,
"learning_rate": 3.4297073302098156e-05,
"loss": 39.0696,
"step": 63
},
{
"epoch": 0.08299225999918954,
"grad_norm": 10.018913269042969,
"learning_rate": 3.2945040226989244e-05,
"loss": 31.6719,
"step": 64
},
{
"epoch": 0.08428901406167687,
"grad_norm": 9.357498168945312,
"learning_rate": 3.16187222484055e-05,
"loss": 19.8276,
"step": 65
},
{
"epoch": 0.0855857681241642,
"grad_norm": 6.495688438415527,
"learning_rate": 3.0319665793780648e-05,
"loss": 12.7109,
"step": 66
},
{
"epoch": 0.08688252218665153,
"grad_norm": 6.495363712310791,
"learning_rate": 2.9049385504830985e-05,
"loss": 10.4677,
"step": 67
},
{
"epoch": 0.08817927624913888,
"grad_norm": 7.190054416656494,
"learning_rate": 2.7809362471550748e-05,
"loss": 10.2256,
"step": 68
},
{
"epoch": 0.08947603031162621,
"grad_norm": 7.307028293609619,
"learning_rate": 2.660104250532764e-05,
"loss": 14.0544,
"step": 69
},
{
"epoch": 0.09077278437411354,
"grad_norm": 4.946413516998291,
"learning_rate": 2.5425834453191232e-05,
"loss": 5.7595,
"step": 70
},
{
"epoch": 0.09206953843660089,
"grad_norm": 7.481423854827881,
"learning_rate": 2.4285108555160577e-05,
"loss": 13.2307,
"step": 71
},
{
"epoch": 0.09336629249908822,
"grad_norm": 9.990346908569336,
"learning_rate": 2.3180194846605367e-05,
"loss": 19.5935,
"step": 72
},
{
"epoch": 0.09466304656157555,
"grad_norm": 9.962550163269043,
"learning_rate": 2.2112381607484417e-05,
"loss": 20.8605,
"step": 73
},
{
"epoch": 0.0959598006240629,
"grad_norm": 15.367573738098145,
"learning_rate": 2.1082913860268765e-05,
"loss": 32.2455,
"step": 74
},
{
"epoch": 0.09725655468655023,
"grad_norm": 23.091154098510742,
"learning_rate": 2.0092991918301108e-05,
"loss": 34.7624,
"step": 75
},
{
"epoch": 0.09725655468655023,
"eval_loss": 0.623828649520874,
"eval_runtime": 2.9349,
"eval_samples_per_second": 17.036,
"eval_steps_per_second": 4.429,
"step": 75
},
{
"epoch": 0.09855330874903756,
"grad_norm": 8.681092262268066,
"learning_rate": 1.91437699862843e-05,
"loss": 35.4697,
"step": 76
},
{
"epoch": 0.0998500628115249,
"grad_norm": 8.077815055847168,
"learning_rate": 1.8236354814530112e-05,
"loss": 26.0121,
"step": 77
},
{
"epoch": 0.10114681687401224,
"grad_norm": 9.344439506530762,
"learning_rate": 1.7371804408538024e-05,
"loss": 17.0227,
"step": 78
},
{
"epoch": 0.10244357093649957,
"grad_norm": 7.068110466003418,
"learning_rate": 1.6551126795408016e-05,
"loss": 12.5787,
"step": 79
},
{
"epoch": 0.10374032499898692,
"grad_norm": 6.340619087219238,
"learning_rate": 1.577527884852619e-05,
"loss": 9.7321,
"step": 80
},
{
"epoch": 0.10503707906147425,
"grad_norm": 8.806065559387207,
"learning_rate": 1.5045165171893116e-05,
"loss": 11.5671,
"step": 81
},
{
"epoch": 0.10633383312396158,
"grad_norm": 7.022125244140625,
"learning_rate": 1.4361637045396029e-05,
"loss": 10.8947,
"step": 82
},
{
"epoch": 0.10763058718644893,
"grad_norm": 8.060321807861328,
"learning_rate": 1.3725491432254624e-05,
"loss": 10.6176,
"step": 83
},
{
"epoch": 0.10892734124893626,
"grad_norm": 8.609227180480957,
"learning_rate": 1.313747004979751e-05,
"loss": 15.1299,
"step": 84
},
{
"epoch": 0.11022409531142359,
"grad_norm": 9.17025089263916,
"learning_rate": 1.2598258504653081e-05,
"loss": 18.7154,
"step": 85
},
{
"epoch": 0.11152084937391094,
"grad_norm": 13.300969123840332,
"learning_rate": 1.2108485493362765e-05,
"loss": 25.0871,
"step": 86
},
{
"epoch": 0.11281760343639827,
"grad_norm": 17.63905906677246,
"learning_rate": 1.1668722069349041e-05,
"loss": 30.7672,
"step": 87
},
{
"epoch": 0.1141143574988856,
"grad_norm": 23.852336883544922,
"learning_rate": 1.1279480977092635e-05,
"loss": 39.7271,
"step": 88
},
{
"epoch": 0.11541111156137294,
"grad_norm": 8.638887405395508,
"learning_rate": 1.094121605429547e-05,
"loss": 27.7971,
"step": 89
},
{
"epoch": 0.11670786562386028,
"grad_norm": 7.553443908691406,
"learning_rate": 1.0654321702726141e-05,
"loss": 18.6966,
"step": 90
},
{
"epoch": 0.11800461968634761,
"grad_norm": 7.639990329742432,
"learning_rate": 1.0419132428365116e-05,
"loss": 15.038,
"step": 91
},
{
"epoch": 0.11930137374883494,
"grad_norm": 6.556589603424072,
"learning_rate": 1.0235922451385733e-05,
"loss": 13.3639,
"step": 92
},
{
"epoch": 0.12059812781132229,
"grad_norm": 7.983014106750488,
"learning_rate": 1.0104905386425733e-05,
"loss": 12.7111,
"step": 93
},
{
"epoch": 0.12189488187380962,
"grad_norm": 6.714142799377441,
"learning_rate": 1.002623399352217e-05,
"loss": 10.5141,
"step": 94
},
{
"epoch": 0.12319163593629695,
"grad_norm": 8.24644947052002,
"learning_rate": 1e-05,
"loss": 12.6936,
"step": 95
}
],
"logging_steps": 1,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.993120808697856e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}