lesso05's picture
Training in progress, step 100, checkpoint
1574628 verified
raw
history blame
20.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.40160642570281124,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004016064257028112,
"grad_norm": 0.7228589057922363,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.6631,
"step": 1
},
{
"epoch": 0.004016064257028112,
"eval_loss": 1.0697861909866333,
"eval_runtime": 32.5901,
"eval_samples_per_second": 6.444,
"eval_steps_per_second": 0.828,
"step": 1
},
{
"epoch": 0.008032128514056224,
"grad_norm": 1.0123487710952759,
"learning_rate": 4.000000000000001e-06,
"loss": 1.6836,
"step": 2
},
{
"epoch": 0.012048192771084338,
"grad_norm": 1.005481481552124,
"learning_rate": 6e-06,
"loss": 1.9228,
"step": 3
},
{
"epoch": 0.01606425702811245,
"grad_norm": 0.8270795941352844,
"learning_rate": 8.000000000000001e-06,
"loss": 2.45,
"step": 4
},
{
"epoch": 0.020080321285140562,
"grad_norm": 0.9999402165412903,
"learning_rate": 1e-05,
"loss": 2.668,
"step": 5
},
{
"epoch": 0.024096385542168676,
"grad_norm": 0.9072601199150085,
"learning_rate": 1.2e-05,
"loss": 1.8627,
"step": 6
},
{
"epoch": 0.028112449799196786,
"grad_norm": 1.1333314180374146,
"learning_rate": 1.4e-05,
"loss": 2.8953,
"step": 7
},
{
"epoch": 0.0321285140562249,
"grad_norm": 0.9936800003051758,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.2984,
"step": 8
},
{
"epoch": 0.03614457831325301,
"grad_norm": 0.7884902954101562,
"learning_rate": 1.8e-05,
"loss": 1.4,
"step": 9
},
{
"epoch": 0.03614457831325301,
"eval_loss": 1.0618449449539185,
"eval_runtime": 32.5955,
"eval_samples_per_second": 6.443,
"eval_steps_per_second": 0.828,
"step": 9
},
{
"epoch": 0.040160642570281124,
"grad_norm": 1.1049511432647705,
"learning_rate": 2e-05,
"loss": 2.3974,
"step": 10
},
{
"epoch": 0.04417670682730924,
"grad_norm": 1.0087590217590332,
"learning_rate": 1.999390827019096e-05,
"loss": 2.1244,
"step": 11
},
{
"epoch": 0.04819277108433735,
"grad_norm": 1.021081805229187,
"learning_rate": 1.9975640502598243e-05,
"loss": 1.6924,
"step": 12
},
{
"epoch": 0.05220883534136546,
"grad_norm": 0.8872256278991699,
"learning_rate": 1.9945218953682736e-05,
"loss": 1.8039,
"step": 13
},
{
"epoch": 0.05622489959839357,
"grad_norm": 1.019092321395874,
"learning_rate": 1.9902680687415704e-05,
"loss": 1.8685,
"step": 14
},
{
"epoch": 0.060240963855421686,
"grad_norm": 1.4794679880142212,
"learning_rate": 1.9848077530122083e-05,
"loss": 2.1413,
"step": 15
},
{
"epoch": 0.0642570281124498,
"grad_norm": 1.2401800155639648,
"learning_rate": 1.9781476007338058e-05,
"loss": 2.1287,
"step": 16
},
{
"epoch": 0.06827309236947791,
"grad_norm": 1.0547970533370972,
"learning_rate": 1.9702957262759964e-05,
"loss": 1.1896,
"step": 17
},
{
"epoch": 0.07228915662650602,
"grad_norm": 1.1523879766464233,
"learning_rate": 1.961261695938319e-05,
"loss": 1.5816,
"step": 18
},
{
"epoch": 0.07228915662650602,
"eval_loss": 0.985172688961029,
"eval_runtime": 32.6746,
"eval_samples_per_second": 6.427,
"eval_steps_per_second": 0.826,
"step": 18
},
{
"epoch": 0.07630522088353414,
"grad_norm": 1.4140926599502563,
"learning_rate": 1.9510565162951538e-05,
"loss": 1.9035,
"step": 19
},
{
"epoch": 0.08032128514056225,
"grad_norm": 1.519662618637085,
"learning_rate": 1.9396926207859085e-05,
"loss": 1.9739,
"step": 20
},
{
"epoch": 0.08433734939759036,
"grad_norm": 0.977906346321106,
"learning_rate": 1.9271838545667876e-05,
"loss": 1.1063,
"step": 21
},
{
"epoch": 0.08835341365461848,
"grad_norm": 1.301216959953308,
"learning_rate": 1.913545457642601e-05,
"loss": 1.7746,
"step": 22
},
{
"epoch": 0.09236947791164658,
"grad_norm": 1.1488702297210693,
"learning_rate": 1.8987940462991673e-05,
"loss": 1.5895,
"step": 23
},
{
"epoch": 0.0963855421686747,
"grad_norm": 1.351751685142517,
"learning_rate": 1.8829475928589272e-05,
"loss": 2.5842,
"step": 24
},
{
"epoch": 0.10040160642570281,
"grad_norm": 1.2297149896621704,
"learning_rate": 1.866025403784439e-05,
"loss": 1.935,
"step": 25
},
{
"epoch": 0.10441767068273092,
"grad_norm": 1.0193097591400146,
"learning_rate": 1.848048096156426e-05,
"loss": 1.6745,
"step": 26
},
{
"epoch": 0.10843373493975904,
"grad_norm": 1.5117055177688599,
"learning_rate": 1.8290375725550417e-05,
"loss": 1.2787,
"step": 27
},
{
"epoch": 0.10843373493975904,
"eval_loss": 0.8428009152412415,
"eval_runtime": 32.6578,
"eval_samples_per_second": 6.43,
"eval_steps_per_second": 0.827,
"step": 27
},
{
"epoch": 0.11244979919678715,
"grad_norm": 0.8445839881896973,
"learning_rate": 1.8090169943749477e-05,
"loss": 1.6433,
"step": 28
},
{
"epoch": 0.11646586345381527,
"grad_norm": 1.1348373889923096,
"learning_rate": 1.788010753606722e-05,
"loss": 1.4937,
"step": 29
},
{
"epoch": 0.12048192771084337,
"grad_norm": 0.9660128355026245,
"learning_rate": 1.766044443118978e-05,
"loss": 1.2509,
"step": 30
},
{
"epoch": 0.12449799196787148,
"grad_norm": 1.046816110610962,
"learning_rate": 1.7431448254773943e-05,
"loss": 1.6319,
"step": 31
},
{
"epoch": 0.1285140562248996,
"grad_norm": 1.2746326923370361,
"learning_rate": 1.7193398003386514e-05,
"loss": 1.7526,
"step": 32
},
{
"epoch": 0.13253012048192772,
"grad_norm": 1.138624906539917,
"learning_rate": 1.6946583704589973e-05,
"loss": 1.5544,
"step": 33
},
{
"epoch": 0.13654618473895583,
"grad_norm": 1.0225257873535156,
"learning_rate": 1.6691306063588583e-05,
"loss": 1.3283,
"step": 34
},
{
"epoch": 0.14056224899598393,
"grad_norm": 1.3078521490097046,
"learning_rate": 1.6427876096865394e-05,
"loss": 1.6447,
"step": 35
},
{
"epoch": 0.14457831325301204,
"grad_norm": 1.1222729682922363,
"learning_rate": 1.6156614753256583e-05,
"loss": 1.482,
"step": 36
},
{
"epoch": 0.14457831325301204,
"eval_loss": 0.758996844291687,
"eval_runtime": 32.6194,
"eval_samples_per_second": 6.438,
"eval_steps_per_second": 0.828,
"step": 36
},
{
"epoch": 0.14859437751004015,
"grad_norm": 1.4948545694351196,
"learning_rate": 1.5877852522924733e-05,
"loss": 1.4995,
"step": 37
},
{
"epoch": 0.15261044176706828,
"grad_norm": 1.0291143655776978,
"learning_rate": 1.5591929034707468e-05,
"loss": 1.2655,
"step": 38
},
{
"epoch": 0.1566265060240964,
"grad_norm": 1.1123580932617188,
"learning_rate": 1.529919264233205e-05,
"loss": 1.8731,
"step": 39
},
{
"epoch": 0.1606425702811245,
"grad_norm": 0.7641251087188721,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.7626,
"step": 40
},
{
"epoch": 0.1646586345381526,
"grad_norm": 1.2452340126037598,
"learning_rate": 1.469471562785891e-05,
"loss": 1.3199,
"step": 41
},
{
"epoch": 0.1686746987951807,
"grad_norm": 1.4203380346298218,
"learning_rate": 1.4383711467890776e-05,
"loss": 1.3807,
"step": 42
},
{
"epoch": 0.17269076305220885,
"grad_norm": 1.2240241765975952,
"learning_rate": 1.4067366430758004e-05,
"loss": 1.3466,
"step": 43
},
{
"epoch": 0.17670682730923695,
"grad_norm": 1.2436611652374268,
"learning_rate": 1.3746065934159123e-05,
"loss": 1.4975,
"step": 44
},
{
"epoch": 0.18072289156626506,
"grad_norm": 1.1533771753311157,
"learning_rate": 1.342020143325669e-05,
"loss": 1.409,
"step": 45
},
{
"epoch": 0.18072289156626506,
"eval_loss": 0.7062954902648926,
"eval_runtime": 32.6227,
"eval_samples_per_second": 6.437,
"eval_steps_per_second": 0.828,
"step": 45
},
{
"epoch": 0.18473895582329317,
"grad_norm": 1.3506983518600464,
"learning_rate": 1.3090169943749475e-05,
"loss": 1.1306,
"step": 46
},
{
"epoch": 0.18875502008032127,
"grad_norm": 1.8663361072540283,
"learning_rate": 1.2756373558169992e-05,
"loss": 1.3438,
"step": 47
},
{
"epoch": 0.1927710843373494,
"grad_norm": 1.1213346719741821,
"learning_rate": 1.2419218955996677e-05,
"loss": 1.3015,
"step": 48
},
{
"epoch": 0.19678714859437751,
"grad_norm": 1.3179396390914917,
"learning_rate": 1.2079116908177592e-05,
"loss": 1.4581,
"step": 49
},
{
"epoch": 0.20080321285140562,
"grad_norm": 1.3209939002990723,
"learning_rate": 1.1736481776669307e-05,
"loss": 1.3032,
"step": 50
},
{
"epoch": 0.20481927710843373,
"grad_norm": 1.0638103485107422,
"learning_rate": 1.1391731009600655e-05,
"loss": 1.3361,
"step": 51
},
{
"epoch": 0.20883534136546184,
"grad_norm": 1.422438144683838,
"learning_rate": 1.1045284632676535e-05,
"loss": 1.4487,
"step": 52
},
{
"epoch": 0.21285140562248997,
"grad_norm": 1.2671741247177124,
"learning_rate": 1.0697564737441254e-05,
"loss": 1.3626,
"step": 53
},
{
"epoch": 0.21686746987951808,
"grad_norm": 1.0545305013656616,
"learning_rate": 1.0348994967025012e-05,
"loss": 1.3192,
"step": 54
},
{
"epoch": 0.21686746987951808,
"eval_loss": 0.67100989818573,
"eval_runtime": 32.6003,
"eval_samples_per_second": 6.442,
"eval_steps_per_second": 0.828,
"step": 54
},
{
"epoch": 0.22088353413654618,
"grad_norm": 1.1217244863510132,
"learning_rate": 1e-05,
"loss": 1.0894,
"step": 55
},
{
"epoch": 0.2248995983935743,
"grad_norm": 1.5441569089889526,
"learning_rate": 9.651005032974994e-06,
"loss": 1.5076,
"step": 56
},
{
"epoch": 0.2289156626506024,
"grad_norm": 1.2194684743881226,
"learning_rate": 9.302435262558748e-06,
"loss": 1.3286,
"step": 57
},
{
"epoch": 0.23293172690763053,
"grad_norm": 1.2157171964645386,
"learning_rate": 8.954715367323468e-06,
"loss": 1.1355,
"step": 58
},
{
"epoch": 0.23694779116465864,
"grad_norm": 1.2383450269699097,
"learning_rate": 8.60826899039935e-06,
"loss": 1.3349,
"step": 59
},
{
"epoch": 0.24096385542168675,
"grad_norm": 1.0101817846298218,
"learning_rate": 8.263518223330698e-06,
"loss": 0.992,
"step": 60
},
{
"epoch": 0.24497991967871485,
"grad_norm": 1.3267630338668823,
"learning_rate": 7.92088309182241e-06,
"loss": 1.4941,
"step": 61
},
{
"epoch": 0.24899598393574296,
"grad_norm": 1.0341275930404663,
"learning_rate": 7.580781044003324e-06,
"loss": 0.8775,
"step": 62
},
{
"epoch": 0.25301204819277107,
"grad_norm": 1.5267117023468018,
"learning_rate": 7.243626441830009e-06,
"loss": 0.9736,
"step": 63
},
{
"epoch": 0.25301204819277107,
"eval_loss": 0.6493720412254333,
"eval_runtime": 32.5813,
"eval_samples_per_second": 6.445,
"eval_steps_per_second": 0.829,
"step": 63
},
{
"epoch": 0.2570281124497992,
"grad_norm": 1.3753007650375366,
"learning_rate": 6.909830056250527e-06,
"loss": 1.4487,
"step": 64
},
{
"epoch": 0.26104417670682734,
"grad_norm": 1.2772159576416016,
"learning_rate": 6.579798566743314e-06,
"loss": 1.0686,
"step": 65
},
{
"epoch": 0.26506024096385544,
"grad_norm": 1.349326491355896,
"learning_rate": 6.25393406584088e-06,
"loss": 1.209,
"step": 66
},
{
"epoch": 0.26907630522088355,
"grad_norm": 1.2562471628189087,
"learning_rate": 5.932633569242e-06,
"loss": 1.3394,
"step": 67
},
{
"epoch": 0.27309236947791166,
"grad_norm": 1.4296122789382935,
"learning_rate": 5.616288532109225e-06,
"loss": 1.3279,
"step": 68
},
{
"epoch": 0.27710843373493976,
"grad_norm": 1.0856250524520874,
"learning_rate": 5.305284372141095e-06,
"loss": 0.6899,
"step": 69
},
{
"epoch": 0.28112449799196787,
"grad_norm": 1.3272840976715088,
"learning_rate": 5.000000000000003e-06,
"loss": 1.2571,
"step": 70
},
{
"epoch": 0.285140562248996,
"grad_norm": 1.5904431343078613,
"learning_rate": 4.700807357667953e-06,
"loss": 1.2008,
"step": 71
},
{
"epoch": 0.2891566265060241,
"grad_norm": 1.8825539350509644,
"learning_rate": 4.408070965292534e-06,
"loss": 1.9554,
"step": 72
},
{
"epoch": 0.2891566265060241,
"eval_loss": 0.6365012526512146,
"eval_runtime": 32.5792,
"eval_samples_per_second": 6.446,
"eval_steps_per_second": 0.829,
"step": 72
},
{
"epoch": 0.2931726907630522,
"grad_norm": 1.4897550344467163,
"learning_rate": 4.12214747707527e-06,
"loss": 1.395,
"step": 73
},
{
"epoch": 0.2971887550200803,
"grad_norm": 1.3629871606826782,
"learning_rate": 3.8433852467434175e-06,
"loss": 1.3097,
"step": 74
},
{
"epoch": 0.30120481927710846,
"grad_norm": 1.578955888748169,
"learning_rate": 3.5721239031346067e-06,
"loss": 1.3274,
"step": 75
},
{
"epoch": 0.30522088353413657,
"grad_norm": 1.4249285459518433,
"learning_rate": 3.308693936411421e-06,
"loss": 1.2748,
"step": 76
},
{
"epoch": 0.3092369477911647,
"grad_norm": 1.1587796211242676,
"learning_rate": 3.0534162954100264e-06,
"loss": 1.0238,
"step": 77
},
{
"epoch": 0.3132530120481928,
"grad_norm": 1.302412748336792,
"learning_rate": 2.8066019966134907e-06,
"loss": 1.3392,
"step": 78
},
{
"epoch": 0.3172690763052209,
"grad_norm": 1.0670229196548462,
"learning_rate": 2.5685517452260566e-06,
"loss": 0.8408,
"step": 79
},
{
"epoch": 0.321285140562249,
"grad_norm": 1.086983561515808,
"learning_rate": 2.339555568810221e-06,
"loss": 0.6949,
"step": 80
},
{
"epoch": 0.3253012048192771,
"grad_norm": 1.277090311050415,
"learning_rate": 2.119892463932781e-06,
"loss": 0.8406,
"step": 81
},
{
"epoch": 0.3253012048192771,
"eval_loss": 0.6291564702987671,
"eval_runtime": 32.5821,
"eval_samples_per_second": 6.445,
"eval_steps_per_second": 0.829,
"step": 81
},
{
"epoch": 0.3293172690763052,
"grad_norm": 1.555704951286316,
"learning_rate": 1.9098300562505266e-06,
"loss": 1.2604,
"step": 82
},
{
"epoch": 0.3333333333333333,
"grad_norm": 1.5652434825897217,
"learning_rate": 1.709624274449584e-06,
"loss": 1.6627,
"step": 83
},
{
"epoch": 0.3373493975903614,
"grad_norm": 1.3343106508255005,
"learning_rate": 1.5195190384357405e-06,
"loss": 1.3886,
"step": 84
},
{
"epoch": 0.3413654618473896,
"grad_norm": 1.630918264389038,
"learning_rate": 1.339745962155613e-06,
"loss": 1.4081,
"step": 85
},
{
"epoch": 0.3453815261044177,
"grad_norm": 1.3480379581451416,
"learning_rate": 1.1705240714107301e-06,
"loss": 0.8545,
"step": 86
},
{
"epoch": 0.3493975903614458,
"grad_norm": 1.7904911041259766,
"learning_rate": 1.012059537008332e-06,
"loss": 1.2855,
"step": 87
},
{
"epoch": 0.3534136546184739,
"grad_norm": 1.5661286115646362,
"learning_rate": 8.645454235739903e-07,
"loss": 1.4537,
"step": 88
},
{
"epoch": 0.357429718875502,
"grad_norm": 1.5383715629577637,
"learning_rate": 7.281614543321269e-07,
"loss": 1.4315,
"step": 89
},
{
"epoch": 0.3614457831325301,
"grad_norm": 1.7544879913330078,
"learning_rate": 6.030737921409169e-07,
"loss": 1.5103,
"step": 90
},
{
"epoch": 0.3614457831325301,
"eval_loss": 0.6263256072998047,
"eval_runtime": 32.6143,
"eval_samples_per_second": 6.439,
"eval_steps_per_second": 0.828,
"step": 90
},
{
"epoch": 0.3654618473895582,
"grad_norm": 1.6552611589431763,
"learning_rate": 4.894348370484648e-07,
"loss": 1.7058,
"step": 91
},
{
"epoch": 0.36947791164658633,
"grad_norm": 1.3507189750671387,
"learning_rate": 3.8738304061681107e-07,
"loss": 0.9932,
"step": 92
},
{
"epoch": 0.37349397590361444,
"grad_norm": 1.7394344806671143,
"learning_rate": 2.970427372400353e-07,
"loss": 1.4815,
"step": 93
},
{
"epoch": 0.37751004016064255,
"grad_norm": 1.3886154890060425,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.9411,
"step": 94
},
{
"epoch": 0.3815261044176707,
"grad_norm": 1.3755466938018799,
"learning_rate": 1.519224698779198e-07,
"loss": 1.3225,
"step": 95
},
{
"epoch": 0.3855421686746988,
"grad_norm": 1.3697952032089233,
"learning_rate": 9.731931258429638e-08,
"loss": 1.307,
"step": 96
},
{
"epoch": 0.3895582329317269,
"grad_norm": 1.1580904722213745,
"learning_rate": 5.4781046317267103e-08,
"loss": 0.7828,
"step": 97
},
{
"epoch": 0.39357429718875503,
"grad_norm": 1.169704794883728,
"learning_rate": 2.4359497401758026e-08,
"loss": 1.1284,
"step": 98
},
{
"epoch": 0.39759036144578314,
"grad_norm": 1.4925367832183838,
"learning_rate": 6.091729809042379e-09,
"loss": 1.0595,
"step": 99
},
{
"epoch": 0.39759036144578314,
"eval_loss": 0.6255317330360413,
"eval_runtime": 32.6,
"eval_samples_per_second": 6.442,
"eval_steps_per_second": 0.828,
"step": 99
},
{
"epoch": 0.40160642570281124,
"grad_norm": 1.5164817571640015,
"learning_rate": 0.0,
"loss": 1.4775,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2695579394048e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}