lesso02's picture
Training in progress, step 100, checkpoint
999b005 verified
raw
history blame
20.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.704225352112676,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007042253521126761,
"grad_norm": 39.11391067504883,
"learning_rate": 1e-05,
"loss": 10.2157,
"step": 1
},
{
"epoch": 0.007042253521126761,
"eval_loss": 10.0960111618042,
"eval_runtime": 13.1147,
"eval_samples_per_second": 9.15,
"eval_steps_per_second": 1.144,
"step": 1
},
{
"epoch": 0.014084507042253521,
"grad_norm": 35.15620040893555,
"learning_rate": 2e-05,
"loss": 10.0954,
"step": 2
},
{
"epoch": 0.02112676056338028,
"grad_norm": 37.665611267089844,
"learning_rate": 3e-05,
"loss": 10.1154,
"step": 3
},
{
"epoch": 0.028169014084507043,
"grad_norm": 34.913848876953125,
"learning_rate": 4e-05,
"loss": 9.3856,
"step": 4
},
{
"epoch": 0.035211267605633804,
"grad_norm": 41.44451141357422,
"learning_rate": 5e-05,
"loss": 8.5514,
"step": 5
},
{
"epoch": 0.04225352112676056,
"grad_norm": 34.1007194519043,
"learning_rate": 6e-05,
"loss": 6.3474,
"step": 6
},
{
"epoch": 0.04929577464788732,
"grad_norm": 21.132896423339844,
"learning_rate": 7e-05,
"loss": 4.1504,
"step": 7
},
{
"epoch": 0.056338028169014086,
"grad_norm": 22.86393165588379,
"learning_rate": 8e-05,
"loss": 2.7505,
"step": 8
},
{
"epoch": 0.06338028169014084,
"grad_norm": 20.914813995361328,
"learning_rate": 9e-05,
"loss": 1.2445,
"step": 9
},
{
"epoch": 0.06338028169014084,
"eval_loss": 0.5339353680610657,
"eval_runtime": 13.1745,
"eval_samples_per_second": 9.109,
"eval_steps_per_second": 1.139,
"step": 9
},
{
"epoch": 0.07042253521126761,
"grad_norm": 10.226069450378418,
"learning_rate": 0.0001,
"loss": 0.6835,
"step": 10
},
{
"epoch": 0.07746478873239436,
"grad_norm": 6.849209785461426,
"learning_rate": 9.99695413509548e-05,
"loss": 0.5162,
"step": 11
},
{
"epoch": 0.08450704225352113,
"grad_norm": 9.03485107421875,
"learning_rate": 9.987820251299122e-05,
"loss": 0.5481,
"step": 12
},
{
"epoch": 0.09154929577464789,
"grad_norm": 4.938020706176758,
"learning_rate": 9.972609476841367e-05,
"loss": 0.3335,
"step": 13
},
{
"epoch": 0.09859154929577464,
"grad_norm": 10.797144889831543,
"learning_rate": 9.951340343707852e-05,
"loss": 0.4465,
"step": 14
},
{
"epoch": 0.1056338028169014,
"grad_norm": 6.949985980987549,
"learning_rate": 9.924038765061042e-05,
"loss": 0.3258,
"step": 15
},
{
"epoch": 0.11267605633802817,
"grad_norm": 6.018566608428955,
"learning_rate": 9.890738003669029e-05,
"loss": 0.2399,
"step": 16
},
{
"epoch": 0.11971830985915492,
"grad_norm": 10.206531524658203,
"learning_rate": 9.851478631379982e-05,
"loss": 0.3624,
"step": 17
},
{
"epoch": 0.1267605633802817,
"grad_norm": 5.126565456390381,
"learning_rate": 9.806308479691595e-05,
"loss": 0.2134,
"step": 18
},
{
"epoch": 0.1267605633802817,
"eval_loss": 0.30221232771873474,
"eval_runtime": 13.1693,
"eval_samples_per_second": 9.112,
"eval_steps_per_second": 1.139,
"step": 18
},
{
"epoch": 0.13380281690140844,
"grad_norm": 8.039265632629395,
"learning_rate": 9.755282581475769e-05,
"loss": 0.5262,
"step": 19
},
{
"epoch": 0.14084507042253522,
"grad_norm": 6.505465984344482,
"learning_rate": 9.698463103929542e-05,
"loss": 0.3438,
"step": 20
},
{
"epoch": 0.14788732394366197,
"grad_norm": 3.672312021255493,
"learning_rate": 9.635919272833938e-05,
"loss": 0.3436,
"step": 21
},
{
"epoch": 0.15492957746478872,
"grad_norm": 3.8408946990966797,
"learning_rate": 9.567727288213005e-05,
"loss": 0.3006,
"step": 22
},
{
"epoch": 0.1619718309859155,
"grad_norm": 6.516692638397217,
"learning_rate": 9.493970231495835e-05,
"loss": 0.5252,
"step": 23
},
{
"epoch": 0.16901408450704225,
"grad_norm": 7.897261142730713,
"learning_rate": 9.414737964294636e-05,
"loss": 0.6106,
"step": 24
},
{
"epoch": 0.176056338028169,
"grad_norm": 5.932919025421143,
"learning_rate": 9.330127018922194e-05,
"loss": 0.4199,
"step": 25
},
{
"epoch": 0.18309859154929578,
"grad_norm": 3.6232476234436035,
"learning_rate": 9.24024048078213e-05,
"loss": 0.2312,
"step": 26
},
{
"epoch": 0.19014084507042253,
"grad_norm": 4.60596227645874,
"learning_rate": 9.145187862775209e-05,
"loss": 0.2139,
"step": 27
},
{
"epoch": 0.19014084507042253,
"eval_loss": 0.2467854768037796,
"eval_runtime": 13.1804,
"eval_samples_per_second": 9.104,
"eval_steps_per_second": 1.138,
"step": 27
},
{
"epoch": 0.19718309859154928,
"grad_norm": 4.2489542961120605,
"learning_rate": 9.045084971874738e-05,
"loss": 0.3291,
"step": 28
},
{
"epoch": 0.20422535211267606,
"grad_norm": 3.9708523750305176,
"learning_rate": 8.940053768033609e-05,
"loss": 0.1092,
"step": 29
},
{
"epoch": 0.2112676056338028,
"grad_norm": 4.278700828552246,
"learning_rate": 8.83022221559489e-05,
"loss": 0.1736,
"step": 30
},
{
"epoch": 0.21830985915492956,
"grad_norm": 6.528390407562256,
"learning_rate": 8.715724127386972e-05,
"loss": 0.3279,
"step": 31
},
{
"epoch": 0.22535211267605634,
"grad_norm": 8.425565719604492,
"learning_rate": 8.596699001693255e-05,
"loss": 0.2106,
"step": 32
},
{
"epoch": 0.2323943661971831,
"grad_norm": 2.0678305625915527,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0663,
"step": 33
},
{
"epoch": 0.23943661971830985,
"grad_norm": 2.6804184913635254,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0761,
"step": 34
},
{
"epoch": 0.24647887323943662,
"grad_norm": 9.867767333984375,
"learning_rate": 8.213938048432697e-05,
"loss": 0.396,
"step": 35
},
{
"epoch": 0.2535211267605634,
"grad_norm": 9.320279121398926,
"learning_rate": 8.07830737662829e-05,
"loss": 0.3407,
"step": 36
},
{
"epoch": 0.2535211267605634,
"eval_loss": 0.2954208552837372,
"eval_runtime": 13.1748,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.139,
"step": 36
},
{
"epoch": 0.2605633802816901,
"grad_norm": 10.335426330566406,
"learning_rate": 7.938926261462366e-05,
"loss": 0.3163,
"step": 37
},
{
"epoch": 0.2676056338028169,
"grad_norm": 1.8607492446899414,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0346,
"step": 38
},
{
"epoch": 0.2746478873239437,
"grad_norm": 5.454378128051758,
"learning_rate": 7.649596321166024e-05,
"loss": 0.1026,
"step": 39
},
{
"epoch": 0.28169014084507044,
"grad_norm": 8.138297080993652,
"learning_rate": 7.500000000000001e-05,
"loss": 0.214,
"step": 40
},
{
"epoch": 0.2887323943661972,
"grad_norm": 12.727496147155762,
"learning_rate": 7.347357813929454e-05,
"loss": 0.3999,
"step": 41
},
{
"epoch": 0.29577464788732394,
"grad_norm": 8.18421745300293,
"learning_rate": 7.191855733945387e-05,
"loss": 0.4488,
"step": 42
},
{
"epoch": 0.3028169014084507,
"grad_norm": 7.231586933135986,
"learning_rate": 7.033683215379002e-05,
"loss": 0.2632,
"step": 43
},
{
"epoch": 0.30985915492957744,
"grad_norm": 11.409683227539062,
"learning_rate": 6.873032967079561e-05,
"loss": 0.4955,
"step": 44
},
{
"epoch": 0.31690140845070425,
"grad_norm": 8.826681137084961,
"learning_rate": 6.710100716628344e-05,
"loss": 0.6152,
"step": 45
},
{
"epoch": 0.31690140845070425,
"eval_loss": 0.2960638701915741,
"eval_runtime": 13.1839,
"eval_samples_per_second": 9.102,
"eval_steps_per_second": 1.138,
"step": 45
},
{
"epoch": 0.323943661971831,
"grad_norm": 9.715123176574707,
"learning_rate": 6.545084971874738e-05,
"loss": 0.4276,
"step": 46
},
{
"epoch": 0.33098591549295775,
"grad_norm": 5.414391040802002,
"learning_rate": 6.378186779084995e-05,
"loss": 0.2602,
"step": 47
},
{
"epoch": 0.3380281690140845,
"grad_norm": 4.150219917297363,
"learning_rate": 6.209609477998338e-05,
"loss": 0.1638,
"step": 48
},
{
"epoch": 0.34507042253521125,
"grad_norm": 4.825133323669434,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.4325,
"step": 49
},
{
"epoch": 0.352112676056338,
"grad_norm": 3.777461290359497,
"learning_rate": 5.868240888334653e-05,
"loss": 0.3466,
"step": 50
},
{
"epoch": 0.3591549295774648,
"grad_norm": 3.6818737983703613,
"learning_rate": 5.695865504800327e-05,
"loss": 0.1963,
"step": 51
},
{
"epoch": 0.36619718309859156,
"grad_norm": 2.7601659297943115,
"learning_rate": 5.522642316338268e-05,
"loss": 0.1896,
"step": 52
},
{
"epoch": 0.3732394366197183,
"grad_norm": 3.4538276195526123,
"learning_rate": 5.348782368720626e-05,
"loss": 0.4291,
"step": 53
},
{
"epoch": 0.38028169014084506,
"grad_norm": 4.041441917419434,
"learning_rate": 5.174497483512506e-05,
"loss": 0.4886,
"step": 54
},
{
"epoch": 0.38028169014084506,
"eval_loss": 0.22900345921516418,
"eval_runtime": 13.1856,
"eval_samples_per_second": 9.101,
"eval_steps_per_second": 1.138,
"step": 54
},
{
"epoch": 0.3873239436619718,
"grad_norm": 3.5224251747131348,
"learning_rate": 5e-05,
"loss": 0.2007,
"step": 55
},
{
"epoch": 0.39436619718309857,
"grad_norm": 3.5779590606689453,
"learning_rate": 4.825502516487497e-05,
"loss": 0.2036,
"step": 56
},
{
"epoch": 0.4014084507042254,
"grad_norm": 2.0858726501464844,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.2388,
"step": 57
},
{
"epoch": 0.4084507042253521,
"grad_norm": 5.528567790985107,
"learning_rate": 4.477357683661734e-05,
"loss": 0.5931,
"step": 58
},
{
"epoch": 0.4154929577464789,
"grad_norm": 3.151121139526367,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.314,
"step": 59
},
{
"epoch": 0.4225352112676056,
"grad_norm": 3.6510658264160156,
"learning_rate": 4.131759111665349e-05,
"loss": 0.2204,
"step": 60
},
{
"epoch": 0.4295774647887324,
"grad_norm": 6.0032525062561035,
"learning_rate": 3.960441545911204e-05,
"loss": 0.2549,
"step": 61
},
{
"epoch": 0.43661971830985913,
"grad_norm": 2.7886574268341064,
"learning_rate": 3.790390522001662e-05,
"loss": 0.2652,
"step": 62
},
{
"epoch": 0.44366197183098594,
"grad_norm": 4.284187316894531,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.1697,
"step": 63
},
{
"epoch": 0.44366197183098594,
"eval_loss": 0.202741339802742,
"eval_runtime": 13.1806,
"eval_samples_per_second": 9.104,
"eval_steps_per_second": 1.138,
"step": 63
},
{
"epoch": 0.4507042253521127,
"grad_norm": 3.984865665435791,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.348,
"step": 64
},
{
"epoch": 0.45774647887323944,
"grad_norm": 5.394085884094238,
"learning_rate": 3.289899283371657e-05,
"loss": 0.2619,
"step": 65
},
{
"epoch": 0.4647887323943662,
"grad_norm": 3.0627472400665283,
"learning_rate": 3.12696703292044e-05,
"loss": 0.1798,
"step": 66
},
{
"epoch": 0.47183098591549294,
"grad_norm": 6.31789493560791,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.4562,
"step": 67
},
{
"epoch": 0.4788732394366197,
"grad_norm": 7.064052104949951,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2269,
"step": 68
},
{
"epoch": 0.4859154929577465,
"grad_norm": 3.158444881439209,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.1979,
"step": 69
},
{
"epoch": 0.49295774647887325,
"grad_norm": 3.1669986248016357,
"learning_rate": 2.500000000000001e-05,
"loss": 0.1897,
"step": 70
},
{
"epoch": 0.5,
"grad_norm": 5.080317974090576,
"learning_rate": 2.350403678833976e-05,
"loss": 0.2205,
"step": 71
},
{
"epoch": 0.5070422535211268,
"grad_norm": 6.588969707489014,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.2209,
"step": 72
},
{
"epoch": 0.5070422535211268,
"eval_loss": 0.2085752934217453,
"eval_runtime": 13.1778,
"eval_samples_per_second": 9.106,
"eval_steps_per_second": 1.138,
"step": 72
},
{
"epoch": 0.5140845070422535,
"grad_norm": 2.9101386070251465,
"learning_rate": 2.061073738537635e-05,
"loss": 0.1591,
"step": 73
},
{
"epoch": 0.5211267605633803,
"grad_norm": 1.1135281324386597,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0342,
"step": 74
},
{
"epoch": 0.528169014084507,
"grad_norm": 3.9573683738708496,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.1127,
"step": 75
},
{
"epoch": 0.5352112676056338,
"grad_norm": 7.286668300628662,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.6188,
"step": 76
},
{
"epoch": 0.5422535211267606,
"grad_norm": 11.555882453918457,
"learning_rate": 1.526708147705013e-05,
"loss": 0.8545,
"step": 77
},
{
"epoch": 0.5492957746478874,
"grad_norm": 3.129523277282715,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.1236,
"step": 78
},
{
"epoch": 0.5563380281690141,
"grad_norm": 2.9682908058166504,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.2428,
"step": 79
},
{
"epoch": 0.5633802816901409,
"grad_norm": 4.371545314788818,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.1944,
"step": 80
},
{
"epoch": 0.5704225352112676,
"grad_norm": 6.686436176300049,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.2305,
"step": 81
},
{
"epoch": 0.5704225352112676,
"eval_loss": 0.20365576446056366,
"eval_runtime": 13.1791,
"eval_samples_per_second": 9.105,
"eval_steps_per_second": 1.138,
"step": 81
},
{
"epoch": 0.5774647887323944,
"grad_norm": 2.3258509635925293,
"learning_rate": 9.549150281252633e-06,
"loss": 0.1026,
"step": 82
},
{
"epoch": 0.5845070422535211,
"grad_norm": 4.362199783325195,
"learning_rate": 8.548121372247918e-06,
"loss": 0.3257,
"step": 83
},
{
"epoch": 0.5915492957746479,
"grad_norm": 3.4101643562316895,
"learning_rate": 7.597595192178702e-06,
"loss": 0.1493,
"step": 84
},
{
"epoch": 0.5985915492957746,
"grad_norm": 4.412539482116699,
"learning_rate": 6.698729810778065e-06,
"loss": 0.3071,
"step": 85
},
{
"epoch": 0.6056338028169014,
"grad_norm": 3.5476527214050293,
"learning_rate": 5.852620357053651e-06,
"loss": 0.1843,
"step": 86
},
{
"epoch": 0.6126760563380281,
"grad_norm": 2.7088310718536377,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0625,
"step": 87
},
{
"epoch": 0.6197183098591549,
"grad_norm": 3.1573803424835205,
"learning_rate": 4.322727117869951e-06,
"loss": 0.1934,
"step": 88
},
{
"epoch": 0.6267605633802817,
"grad_norm": 4.134128093719482,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2589,
"step": 89
},
{
"epoch": 0.6338028169014085,
"grad_norm": 2.6992673873901367,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.088,
"step": 90
},
{
"epoch": 0.6338028169014085,
"eval_loss": 0.20047682523727417,
"eval_runtime": 13.1753,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.138,
"step": 90
},
{
"epoch": 0.6408450704225352,
"grad_norm": 5.939605236053467,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.3984,
"step": 91
},
{
"epoch": 0.647887323943662,
"grad_norm": 2.5079870223999023,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.1128,
"step": 92
},
{
"epoch": 0.6549295774647887,
"grad_norm": 8.377665519714355,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.233,
"step": 93
},
{
"epoch": 0.6619718309859155,
"grad_norm": 2.7900145053863525,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.1707,
"step": 94
},
{
"epoch": 0.6690140845070423,
"grad_norm": 4.159727096557617,
"learning_rate": 7.596123493895991e-07,
"loss": 0.2514,
"step": 95
},
{
"epoch": 0.676056338028169,
"grad_norm": 9.270306587219238,
"learning_rate": 4.865965629214819e-07,
"loss": 0.245,
"step": 96
},
{
"epoch": 0.6830985915492958,
"grad_norm": 2.1187515258789062,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.1109,
"step": 97
},
{
"epoch": 0.6901408450704225,
"grad_norm": 6.0606231689453125,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.2395,
"step": 98
},
{
"epoch": 0.6971830985915493,
"grad_norm": 2.87969970703125,
"learning_rate": 3.04586490452119e-08,
"loss": 0.195,
"step": 99
},
{
"epoch": 0.6971830985915493,
"eval_loss": 0.19799180328845978,
"eval_runtime": 13.1831,
"eval_samples_per_second": 9.103,
"eval_steps_per_second": 1.138,
"step": 99
},
{
"epoch": 0.704225352112676,
"grad_norm": 6.620743751525879,
"learning_rate": 0.0,
"loss": 0.2681,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.41887283560448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}