dada22231's picture
Training in progress, step 95, checkpoint
25fc4ca verified
raw
history blame
18.2 kB
{
"best_metric": 1.3074243068695068,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 2.193548387096774,
"eval_steps": 25,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022939068100358423,
"grad_norm": 262.8689880371094,
"learning_rate": 3.3333333333333335e-05,
"loss": 66.2981,
"step": 1
},
{
"epoch": 0.022939068100358423,
"eval_loss": 2.5055336952209473,
"eval_runtime": 4.7241,
"eval_samples_per_second": 10.584,
"eval_steps_per_second": 2.752,
"step": 1
},
{
"epoch": 0.045878136200716846,
"grad_norm": 341.7585754394531,
"learning_rate": 6.666666666666667e-05,
"loss": 68.6937,
"step": 2
},
{
"epoch": 0.06881720430107527,
"grad_norm": 207.7501220703125,
"learning_rate": 0.0001,
"loss": 59.574,
"step": 3
},
{
"epoch": 0.09175627240143369,
"grad_norm": 110.24981689453125,
"learning_rate": 9.997376600647783e-05,
"loss": 54.2645,
"step": 4
},
{
"epoch": 0.11469534050179211,
"grad_norm": 99.47808074951172,
"learning_rate": 9.989509461357426e-05,
"loss": 54.3133,
"step": 5
},
{
"epoch": 0.13763440860215054,
"grad_norm": 92.41586303710938,
"learning_rate": 9.976407754861426e-05,
"loss": 52.4341,
"step": 6
},
{
"epoch": 0.16057347670250896,
"grad_norm": 78.24821472167969,
"learning_rate": 9.958086757163489e-05,
"loss": 52.4227,
"step": 7
},
{
"epoch": 0.18351254480286738,
"grad_norm": 78.63040161132812,
"learning_rate": 9.934567829727386e-05,
"loss": 47.8582,
"step": 8
},
{
"epoch": 0.2064516129032258,
"grad_norm": 85.06551361083984,
"learning_rate": 9.905878394570453e-05,
"loss": 47.9548,
"step": 9
},
{
"epoch": 0.22939068100358423,
"grad_norm": 95.92952728271484,
"learning_rate": 9.872051902290737e-05,
"loss": 46.4105,
"step": 10
},
{
"epoch": 0.2523297491039427,
"grad_norm": 76.65313720703125,
"learning_rate": 9.833127793065098e-05,
"loss": 41.4597,
"step": 11
},
{
"epoch": 0.2752688172043011,
"grad_norm": 71.17744445800781,
"learning_rate": 9.789151450663723e-05,
"loss": 50.646,
"step": 12
},
{
"epoch": 0.2982078853046595,
"grad_norm": 60.47771453857422,
"learning_rate": 9.740174149534693e-05,
"loss": 48.6955,
"step": 13
},
{
"epoch": 0.3211469534050179,
"grad_norm": 62.11365509033203,
"learning_rate": 9.686252995020249e-05,
"loss": 49.3354,
"step": 14
},
{
"epoch": 0.34408602150537637,
"grad_norm": 57.970787048339844,
"learning_rate": 9.627450856774539e-05,
"loss": 47.0324,
"step": 15
},
{
"epoch": 0.36702508960573477,
"grad_norm": 67.41333770751953,
"learning_rate": 9.563836295460398e-05,
"loss": 45.6518,
"step": 16
},
{
"epoch": 0.3899641577060932,
"grad_norm": 66.67488861083984,
"learning_rate": 9.495483482810688e-05,
"loss": 47.9687,
"step": 17
},
{
"epoch": 0.4129032258064516,
"grad_norm": 64.26166534423828,
"learning_rate": 9.422472115147382e-05,
"loss": 44.4571,
"step": 18
},
{
"epoch": 0.43584229390681006,
"grad_norm": 65.23554229736328,
"learning_rate": 9.3448873204592e-05,
"loss": 46.1585,
"step": 19
},
{
"epoch": 0.45878136200716846,
"grad_norm": 71.3513412475586,
"learning_rate": 9.2628195591462e-05,
"loss": 46.1975,
"step": 20
},
{
"epoch": 0.4817204301075269,
"grad_norm": 69.01107025146484,
"learning_rate": 9.176364518546989e-05,
"loss": 43.0764,
"step": 21
},
{
"epoch": 0.5046594982078854,
"grad_norm": 62.19290542602539,
"learning_rate": 9.08562300137157e-05,
"loss": 43.0619,
"step": 22
},
{
"epoch": 0.5275985663082438,
"grad_norm": 53.53089904785156,
"learning_rate": 8.990700808169889e-05,
"loss": 47.629,
"step": 23
},
{
"epoch": 0.5505376344086022,
"grad_norm": 50.920841217041016,
"learning_rate": 8.891708613973126e-05,
"loss": 46.6122,
"step": 24
},
{
"epoch": 0.5734767025089605,
"grad_norm": 60.60609436035156,
"learning_rate": 8.788761839251559e-05,
"loss": 49.2598,
"step": 25
},
{
"epoch": 0.5734767025089605,
"eval_loss": 1.374174952507019,
"eval_runtime": 4.8014,
"eval_samples_per_second": 10.414,
"eval_steps_per_second": 2.708,
"step": 25
},
{
"epoch": 0.596415770609319,
"grad_norm": 58.53727722167969,
"learning_rate": 8.681980515339464e-05,
"loss": 46.5847,
"step": 26
},
{
"epoch": 0.6193548387096774,
"grad_norm": 55.56049346923828,
"learning_rate": 8.571489144483944e-05,
"loss": 45.1878,
"step": 27
},
{
"epoch": 0.6422939068100358,
"grad_norm": 63.621734619140625,
"learning_rate": 8.457416554680877e-05,
"loss": 46.4274,
"step": 28
},
{
"epoch": 0.6652329749103942,
"grad_norm": 61.650733947753906,
"learning_rate": 8.339895749467238e-05,
"loss": 42.2788,
"step": 29
},
{
"epoch": 0.6881720430107527,
"grad_norm": 59.424530029296875,
"learning_rate": 8.219063752844926e-05,
"loss": 42.7856,
"step": 30
},
{
"epoch": 0.7111111111111111,
"grad_norm": 65.62605285644531,
"learning_rate": 8.095061449516903e-05,
"loss": 45.3296,
"step": 31
},
{
"epoch": 0.7340501792114695,
"grad_norm": 62.037506103515625,
"learning_rate": 7.968033420621935e-05,
"loss": 38.7791,
"step": 32
},
{
"epoch": 0.7569892473118279,
"grad_norm": 54.389366149902344,
"learning_rate": 7.838127775159452e-05,
"loss": 43.6419,
"step": 33
},
{
"epoch": 0.7799283154121864,
"grad_norm": 48.2963981628418,
"learning_rate": 7.705495977301078e-05,
"loss": 47.0971,
"step": 34
},
{
"epoch": 0.8028673835125448,
"grad_norm": 47.147064208984375,
"learning_rate": 7.570292669790186e-05,
"loss": 43.7131,
"step": 35
},
{
"epoch": 0.8258064516129032,
"grad_norm": 49.53687286376953,
"learning_rate": 7.43267549363537e-05,
"loss": 44.4156,
"step": 36
},
{
"epoch": 0.8487455197132616,
"grad_norm": 47.156898498535156,
"learning_rate": 7.292804904308087e-05,
"loss": 39.8579,
"step": 37
},
{
"epoch": 0.8716845878136201,
"grad_norm": 49.07862854003906,
"learning_rate": 7.150843984658754e-05,
"loss": 42.7801,
"step": 38
},
{
"epoch": 0.8946236559139785,
"grad_norm": 54.775672912597656,
"learning_rate": 7.006958254769438e-05,
"loss": 43.8881,
"step": 39
},
{
"epoch": 0.9175627240143369,
"grad_norm": 53.687408447265625,
"learning_rate": 6.861315478964841e-05,
"loss": 40.9659,
"step": 40
},
{
"epoch": 0.9405017921146953,
"grad_norm": 62.848514556884766,
"learning_rate": 6.714085470206609e-05,
"loss": 42.5454,
"step": 41
},
{
"epoch": 0.9634408602150538,
"grad_norm": 62.29214096069336,
"learning_rate": 6.56543989209901e-05,
"loss": 39.6969,
"step": 42
},
{
"epoch": 0.9863799283154122,
"grad_norm": 70.89318084716797,
"learning_rate": 6.415552058736854e-05,
"loss": 40.2985,
"step": 43
},
{
"epoch": 1.0164874551971326,
"grad_norm": 46.05546188354492,
"learning_rate": 6.264596732629e-05,
"loss": 40.5047,
"step": 44
},
{
"epoch": 1.039426523297491,
"grad_norm": 55.51439666748047,
"learning_rate": 6.112749920933111e-05,
"loss": 38.4444,
"step": 45
},
{
"epoch": 1.0623655913978494,
"grad_norm": 42.752933502197266,
"learning_rate": 5.960188670239154e-05,
"loss": 36.3928,
"step": 46
},
{
"epoch": 1.0853046594982079,
"grad_norm": 44.83907699584961,
"learning_rate": 5.80709086014102e-05,
"loss": 38.2118,
"step": 47
},
{
"epoch": 1.1082437275985664,
"grad_norm": 48.16358184814453,
"learning_rate": 5.653634995836856e-05,
"loss": 33.5141,
"step": 48
},
{
"epoch": 1.1311827956989247,
"grad_norm": 45.349609375,
"learning_rate": 5.500000000000001e-05,
"loss": 31.2986,
"step": 49
},
{
"epoch": 1.1541218637992832,
"grad_norm": 46.83755874633789,
"learning_rate": 5.346365004163145e-05,
"loss": 33.2819,
"step": 50
},
{
"epoch": 1.1541218637992832,
"eval_loss": 1.3130438327789307,
"eval_runtime": 4.775,
"eval_samples_per_second": 10.471,
"eval_steps_per_second": 2.723,
"step": 50
},
{
"epoch": 1.1770609318996417,
"grad_norm": 46.695682525634766,
"learning_rate": 5.192909139858981e-05,
"loss": 30.7516,
"step": 51
},
{
"epoch": 1.2,
"grad_norm": 51.038291931152344,
"learning_rate": 5.0398113297608465e-05,
"loss": 29.5743,
"step": 52
},
{
"epoch": 1.2229390681003585,
"grad_norm": 56.04834747314453,
"learning_rate": 4.887250079066892e-05,
"loss": 28.3398,
"step": 53
},
{
"epoch": 1.2458781362007167,
"grad_norm": 58.13565444946289,
"learning_rate": 4.7354032673710005e-05,
"loss": 25.4231,
"step": 54
},
{
"epoch": 1.2688172043010753,
"grad_norm": 57.815208435058594,
"learning_rate": 4.584447941263149e-05,
"loss": 38.2288,
"step": 55
},
{
"epoch": 1.2917562724014338,
"grad_norm": 60.06364440917969,
"learning_rate": 4.43456010790099e-05,
"loss": 37.1063,
"step": 56
},
{
"epoch": 1.314695340501792,
"grad_norm": 54.02146530151367,
"learning_rate": 4.285914529793391e-05,
"loss": 35.0785,
"step": 57
},
{
"epoch": 1.3376344086021505,
"grad_norm": 50.7199592590332,
"learning_rate": 4.13868452103516e-05,
"loss": 32.4164,
"step": 58
},
{
"epoch": 1.360573476702509,
"grad_norm": 49.94532012939453,
"learning_rate": 3.9930417452305626e-05,
"loss": 31.3014,
"step": 59
},
{
"epoch": 1.3835125448028673,
"grad_norm": 53.36236572265625,
"learning_rate": 3.8491560153412466e-05,
"loss": 32.4417,
"step": 60
},
{
"epoch": 1.4064516129032258,
"grad_norm": 52.96565246582031,
"learning_rate": 3.707195095691913e-05,
"loss": 31.0806,
"step": 61
},
{
"epoch": 1.4293906810035844,
"grad_norm": 52.68083572387695,
"learning_rate": 3.567324506364632e-05,
"loss": 30.1467,
"step": 62
},
{
"epoch": 1.4523297491039426,
"grad_norm": 55.56118392944336,
"learning_rate": 3.4297073302098156e-05,
"loss": 29.6131,
"step": 63
},
{
"epoch": 1.4752688172043011,
"grad_norm": 60.30636215209961,
"learning_rate": 3.2945040226989244e-05,
"loss": 30.151,
"step": 64
},
{
"epoch": 1.4982078853046594,
"grad_norm": 54.61465835571289,
"learning_rate": 3.16187222484055e-05,
"loss": 26.281,
"step": 65
},
{
"epoch": 1.521146953405018,
"grad_norm": 48.4765510559082,
"learning_rate": 3.0319665793780648e-05,
"loss": 36.4622,
"step": 66
},
{
"epoch": 1.5440860215053762,
"grad_norm": 54.45204162597656,
"learning_rate": 2.9049385504830985e-05,
"loss": 36.89,
"step": 67
},
{
"epoch": 1.5670250896057347,
"grad_norm": 52.27381134033203,
"learning_rate": 2.7809362471550748e-05,
"loss": 32.998,
"step": 68
},
{
"epoch": 1.5899641577060932,
"grad_norm": 54.354190826416016,
"learning_rate": 2.660104250532764e-05,
"loss": 31.7506,
"step": 69
},
{
"epoch": 1.6129032258064515,
"grad_norm": 55.66099548339844,
"learning_rate": 2.5425834453191232e-05,
"loss": 33.7973,
"step": 70
},
{
"epoch": 1.63584229390681,
"grad_norm": 57.908321380615234,
"learning_rate": 2.4285108555160577e-05,
"loss": 32.1609,
"step": 71
},
{
"epoch": 1.6587813620071685,
"grad_norm": 54.846675872802734,
"learning_rate": 2.3180194846605367e-05,
"loss": 30.6282,
"step": 72
},
{
"epoch": 1.6817204301075268,
"grad_norm": 56.08439636230469,
"learning_rate": 2.2112381607484417e-05,
"loss": 29.2229,
"step": 73
},
{
"epoch": 1.7046594982078853,
"grad_norm": 61.20675277709961,
"learning_rate": 2.1082913860268765e-05,
"loss": 29.949,
"step": 74
},
{
"epoch": 1.7275985663082438,
"grad_norm": 59.61654281616211,
"learning_rate": 2.0092991918301108e-05,
"loss": 25.5925,
"step": 75
},
{
"epoch": 1.7275985663082438,
"eval_loss": 1.3074243068695068,
"eval_runtime": 4.7759,
"eval_samples_per_second": 10.469,
"eval_steps_per_second": 2.722,
"step": 75
},
{
"epoch": 1.750537634408602,
"grad_norm": 55.19490051269531,
"learning_rate": 1.91437699862843e-05,
"loss": 28.7145,
"step": 76
},
{
"epoch": 1.7734767025089606,
"grad_norm": 54.55601501464844,
"learning_rate": 1.8236354814530112e-05,
"loss": 37.6403,
"step": 77
},
{
"epoch": 1.7964157706093191,
"grad_norm": 53.513118743896484,
"learning_rate": 1.7371804408538024e-05,
"loss": 34.2424,
"step": 78
},
{
"epoch": 1.8193548387096774,
"grad_norm": 53.09416198730469,
"learning_rate": 1.6551126795408016e-05,
"loss": 34.4384,
"step": 79
},
{
"epoch": 1.8422939068100357,
"grad_norm": 54.03006362915039,
"learning_rate": 1.577527884852619e-05,
"loss": 31.8524,
"step": 80
},
{
"epoch": 1.8652329749103944,
"grad_norm": 53.396759033203125,
"learning_rate": 1.5045165171893116e-05,
"loss": 31.2347,
"step": 81
},
{
"epoch": 1.8881720430107527,
"grad_norm": 54.46861267089844,
"learning_rate": 1.4361637045396029e-05,
"loss": 31.0389,
"step": 82
},
{
"epoch": 1.911111111111111,
"grad_norm": 56.113277435302734,
"learning_rate": 1.3725491432254624e-05,
"loss": 27.2539,
"step": 83
},
{
"epoch": 1.9340501792114695,
"grad_norm": 57.12498474121094,
"learning_rate": 1.313747004979751e-05,
"loss": 29.4038,
"step": 84
},
{
"epoch": 1.956989247311828,
"grad_norm": 61.591796875,
"learning_rate": 1.2598258504653081e-05,
"loss": 29.9574,
"step": 85
},
{
"epoch": 1.9799283154121863,
"grad_norm": 65.86859893798828,
"learning_rate": 1.2108485493362765e-05,
"loss": 27.3837,
"step": 86
},
{
"epoch": 2.010035842293907,
"grad_norm": 53.8763313293457,
"learning_rate": 1.1668722069349041e-05,
"loss": 30.1786,
"step": 87
},
{
"epoch": 2.032974910394265,
"grad_norm": 46.642704010009766,
"learning_rate": 1.1279480977092635e-05,
"loss": 32.0186,
"step": 88
},
{
"epoch": 2.055913978494624,
"grad_norm": 47.648860931396484,
"learning_rate": 1.094121605429547e-05,
"loss": 29.1689,
"step": 89
},
{
"epoch": 2.078853046594982,
"grad_norm": 51.06687927246094,
"learning_rate": 1.0654321702726141e-05,
"loss": 27.9636,
"step": 90
},
{
"epoch": 2.1017921146953404,
"grad_norm": 59.35122299194336,
"learning_rate": 1.0419132428365116e-05,
"loss": 26.5625,
"step": 91
},
{
"epoch": 2.1247311827956987,
"grad_norm": 51.24475860595703,
"learning_rate": 1.0235922451385733e-05,
"loss": 26.1366,
"step": 92
},
{
"epoch": 2.1476702508960575,
"grad_norm": 53.65019607543945,
"learning_rate": 1.0104905386425733e-05,
"loss": 24.5783,
"step": 93
},
{
"epoch": 2.1706093189964157,
"grad_norm": 53.362648010253906,
"learning_rate": 1.002623399352217e-05,
"loss": 22.3915,
"step": 94
},
{
"epoch": 2.193548387096774,
"grad_norm": 55.55318069458008,
"learning_rate": 1e-05,
"loss": 23.159,
"step": 95
}
],
"logging_steps": 1,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0750240542739661e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}