lesso07's picture
Training in progress, step 100, checkpoint
94e6f88 verified
raw
history blame
20.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.15885623510722796,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015885623510722795,
"grad_norm": 0.7588194012641907,
"learning_rate": 1e-05,
"loss": 2.0204,
"step": 1
},
{
"epoch": 0.0015885623510722795,
"eval_loss": 2.1602227687835693,
"eval_runtime": 25.7941,
"eval_samples_per_second": 20.547,
"eval_steps_per_second": 2.597,
"step": 1
},
{
"epoch": 0.003177124702144559,
"grad_norm": 0.5876681208610535,
"learning_rate": 2e-05,
"loss": 2.0054,
"step": 2
},
{
"epoch": 0.004765687053216839,
"grad_norm": 0.7118221521377563,
"learning_rate": 3e-05,
"loss": 2.1649,
"step": 3
},
{
"epoch": 0.006354249404289118,
"grad_norm": 0.604448139667511,
"learning_rate": 4e-05,
"loss": 1.9918,
"step": 4
},
{
"epoch": 0.007942811755361398,
"grad_norm": 1.1285226345062256,
"learning_rate": 5e-05,
"loss": 2.9166,
"step": 5
},
{
"epoch": 0.009531374106433678,
"grad_norm": 0.7710039019584656,
"learning_rate": 6e-05,
"loss": 2.0509,
"step": 6
},
{
"epoch": 0.011119936457505957,
"grad_norm": 0.731193482875824,
"learning_rate": 7e-05,
"loss": 1.8467,
"step": 7
},
{
"epoch": 0.012708498808578236,
"grad_norm": 0.6948415040969849,
"learning_rate": 8e-05,
"loss": 1.7287,
"step": 8
},
{
"epoch": 0.014297061159650517,
"grad_norm": 0.6710980534553528,
"learning_rate": 9e-05,
"loss": 2.1689,
"step": 9
},
{
"epoch": 0.014297061159650517,
"eval_loss": 2.0491185188293457,
"eval_runtime": 25.1257,
"eval_samples_per_second": 21.094,
"eval_steps_per_second": 2.667,
"step": 9
},
{
"epoch": 0.015885623510722795,
"grad_norm": 0.6445783376693726,
"learning_rate": 0.0001,
"loss": 1.8429,
"step": 10
},
{
"epoch": 0.017474185861795076,
"grad_norm": 0.6626678109169006,
"learning_rate": 9.99695413509548e-05,
"loss": 2.0179,
"step": 11
},
{
"epoch": 0.019062748212867357,
"grad_norm": 0.6601588726043701,
"learning_rate": 9.987820251299122e-05,
"loss": 1.6245,
"step": 12
},
{
"epoch": 0.020651310563939634,
"grad_norm": 0.8181198239326477,
"learning_rate": 9.972609476841367e-05,
"loss": 2.42,
"step": 13
},
{
"epoch": 0.022239872915011914,
"grad_norm": 0.7023542523384094,
"learning_rate": 9.951340343707852e-05,
"loss": 1.3644,
"step": 14
},
{
"epoch": 0.023828435266084195,
"grad_norm": 0.7344656586647034,
"learning_rate": 9.924038765061042e-05,
"loss": 1.4728,
"step": 15
},
{
"epoch": 0.025416997617156472,
"grad_norm": 1.249544382095337,
"learning_rate": 9.890738003669029e-05,
"loss": 1.8716,
"step": 16
},
{
"epoch": 0.027005559968228753,
"grad_norm": 0.8132469058036804,
"learning_rate": 9.851478631379982e-05,
"loss": 2.0647,
"step": 17
},
{
"epoch": 0.028594122319301033,
"grad_norm": 0.9918925166130066,
"learning_rate": 9.806308479691595e-05,
"loss": 2.0602,
"step": 18
},
{
"epoch": 0.028594122319301033,
"eval_loss": 1.6575732231140137,
"eval_runtime": 25.1955,
"eval_samples_per_second": 21.036,
"eval_steps_per_second": 2.659,
"step": 18
},
{
"epoch": 0.030182684670373314,
"grad_norm": 0.9467197060585022,
"learning_rate": 9.755282581475769e-05,
"loss": 1.515,
"step": 19
},
{
"epoch": 0.03177124702144559,
"grad_norm": 0.8416855931282043,
"learning_rate": 9.698463103929542e-05,
"loss": 1.4362,
"step": 20
},
{
"epoch": 0.03335980937251787,
"grad_norm": 2.2242186069488525,
"learning_rate": 9.635919272833938e-05,
"loss": 1.9894,
"step": 21
},
{
"epoch": 0.03494837172359015,
"grad_norm": 1.3053258657455444,
"learning_rate": 9.567727288213005e-05,
"loss": 1.5462,
"step": 22
},
{
"epoch": 0.03653693407466243,
"grad_norm": 1.1135425567626953,
"learning_rate": 9.493970231495835e-05,
"loss": 1.4904,
"step": 23
},
{
"epoch": 0.03812549642573471,
"grad_norm": 1.121027946472168,
"learning_rate": 9.414737964294636e-05,
"loss": 0.9785,
"step": 24
},
{
"epoch": 0.03971405877680699,
"grad_norm": 0.9957679510116577,
"learning_rate": 9.330127018922194e-05,
"loss": 1.1314,
"step": 25
},
{
"epoch": 0.04130262112787927,
"grad_norm": 1.321761131286621,
"learning_rate": 9.24024048078213e-05,
"loss": 0.9808,
"step": 26
},
{
"epoch": 0.04289118347895155,
"grad_norm": 1.9307682514190674,
"learning_rate": 9.145187862775209e-05,
"loss": 1.2907,
"step": 27
},
{
"epoch": 0.04289118347895155,
"eval_loss": 1.3178653717041016,
"eval_runtime": 25.3243,
"eval_samples_per_second": 20.929,
"eval_steps_per_second": 2.646,
"step": 27
},
{
"epoch": 0.04447974583002383,
"grad_norm": 1.483275294303894,
"learning_rate": 9.045084971874738e-05,
"loss": 1.2794,
"step": 28
},
{
"epoch": 0.046068308181096106,
"grad_norm": 1.1199136972427368,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3781,
"step": 29
},
{
"epoch": 0.04765687053216839,
"grad_norm": 1.032144546508789,
"learning_rate": 8.83022221559489e-05,
"loss": 0.9221,
"step": 30
},
{
"epoch": 0.04924543288324067,
"grad_norm": 1.8174779415130615,
"learning_rate": 8.715724127386972e-05,
"loss": 1.1036,
"step": 31
},
{
"epoch": 0.050833995234312944,
"grad_norm": 2.4534947872161865,
"learning_rate": 8.596699001693255e-05,
"loss": 1.4768,
"step": 32
},
{
"epoch": 0.05242255758538523,
"grad_norm": 1.4904756546020508,
"learning_rate": 8.473291852294987e-05,
"loss": 1.6205,
"step": 33
},
{
"epoch": 0.054011119936457505,
"grad_norm": 1.0817906856536865,
"learning_rate": 8.345653031794292e-05,
"loss": 0.99,
"step": 34
},
{
"epoch": 0.05559968228752978,
"grad_norm": 0.9891014695167542,
"learning_rate": 8.213938048432697e-05,
"loss": 0.9236,
"step": 35
},
{
"epoch": 0.057188244638602066,
"grad_norm": 1.5776753425598145,
"learning_rate": 8.07830737662829e-05,
"loss": 1.3738,
"step": 36
},
{
"epoch": 0.057188244638602066,
"eval_loss": 1.063091516494751,
"eval_runtime": 25.4151,
"eval_samples_per_second": 20.854,
"eval_steps_per_second": 2.636,
"step": 36
},
{
"epoch": 0.05877680698967434,
"grad_norm": 1.2596601247787476,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5831,
"step": 37
},
{
"epoch": 0.06036536934074663,
"grad_norm": 1.726762294769287,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0484,
"step": 38
},
{
"epoch": 0.061953931691818905,
"grad_norm": 1.2052408456802368,
"learning_rate": 7.649596321166024e-05,
"loss": 0.9753,
"step": 39
},
{
"epoch": 0.06354249404289118,
"grad_norm": 1.8810499906539917,
"learning_rate": 7.500000000000001e-05,
"loss": 1.304,
"step": 40
},
{
"epoch": 0.06513105639396347,
"grad_norm": 1.0861737728118896,
"learning_rate": 7.347357813929454e-05,
"loss": 0.7932,
"step": 41
},
{
"epoch": 0.06671961874503574,
"grad_norm": 0.9291259050369263,
"learning_rate": 7.191855733945387e-05,
"loss": 0.825,
"step": 42
},
{
"epoch": 0.06830818109610802,
"grad_norm": 5.190133094787598,
"learning_rate": 7.033683215379002e-05,
"loss": 1.8384,
"step": 43
},
{
"epoch": 0.0698967434471803,
"grad_norm": 2.087160110473633,
"learning_rate": 6.873032967079561e-05,
"loss": 1.1688,
"step": 44
},
{
"epoch": 0.07148530579825259,
"grad_norm": 1.2757378816604614,
"learning_rate": 6.710100716628344e-05,
"loss": 0.5792,
"step": 45
},
{
"epoch": 0.07148530579825259,
"eval_loss": 0.8744444847106934,
"eval_runtime": 25.3358,
"eval_samples_per_second": 20.919,
"eval_steps_per_second": 2.644,
"step": 45
},
{
"epoch": 0.07307386814932486,
"grad_norm": 1.5674198865890503,
"learning_rate": 6.545084971874738e-05,
"loss": 0.8157,
"step": 46
},
{
"epoch": 0.07466243050039714,
"grad_norm": 1.1018582582473755,
"learning_rate": 6.378186779084995e-05,
"loss": 0.9487,
"step": 47
},
{
"epoch": 0.07625099285146943,
"grad_norm": 1.1540908813476562,
"learning_rate": 6.209609477998338e-05,
"loss": 0.6282,
"step": 48
},
{
"epoch": 0.0778395552025417,
"grad_norm": 1.7604328393936157,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.1567,
"step": 49
},
{
"epoch": 0.07942811755361398,
"grad_norm": 2.1825644969940186,
"learning_rate": 5.868240888334653e-05,
"loss": 0.8163,
"step": 50
},
{
"epoch": 0.08101667990468626,
"grad_norm": 2.8380324840545654,
"learning_rate": 5.695865504800327e-05,
"loss": 0.8139,
"step": 51
},
{
"epoch": 0.08260524225575853,
"grad_norm": 0.9675163626670837,
"learning_rate": 5.522642316338268e-05,
"loss": 0.3775,
"step": 52
},
{
"epoch": 0.08419380460683082,
"grad_norm": 2.7258806228637695,
"learning_rate": 5.348782368720626e-05,
"loss": 1.2049,
"step": 53
},
{
"epoch": 0.0857823669579031,
"grad_norm": 1.352913737297058,
"learning_rate": 5.174497483512506e-05,
"loss": 0.7722,
"step": 54
},
{
"epoch": 0.0857823669579031,
"eval_loss": 0.7361476421356201,
"eval_runtime": 25.0673,
"eval_samples_per_second": 21.143,
"eval_steps_per_second": 2.673,
"step": 54
},
{
"epoch": 0.08737092930897537,
"grad_norm": 2.3246042728424072,
"learning_rate": 5e-05,
"loss": 0.7954,
"step": 55
},
{
"epoch": 0.08895949166004766,
"grad_norm": 1.1780191659927368,
"learning_rate": 4.825502516487497e-05,
"loss": 1.0354,
"step": 56
},
{
"epoch": 0.09054805401111994,
"grad_norm": 2.115222930908203,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.6829,
"step": 57
},
{
"epoch": 0.09213661636219221,
"grad_norm": 1.4351661205291748,
"learning_rate": 4.477357683661734e-05,
"loss": 0.5475,
"step": 58
},
{
"epoch": 0.0937251787132645,
"grad_norm": 3.4773356914520264,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.6846,
"step": 59
},
{
"epoch": 0.09531374106433678,
"grad_norm": 1.848775029182434,
"learning_rate": 4.131759111665349e-05,
"loss": 0.5564,
"step": 60
},
{
"epoch": 0.09690230341540905,
"grad_norm": 3.542532444000244,
"learning_rate": 3.960441545911204e-05,
"loss": 0.605,
"step": 61
},
{
"epoch": 0.09849086576648133,
"grad_norm": 1.1068249940872192,
"learning_rate": 3.790390522001662e-05,
"loss": 0.5019,
"step": 62
},
{
"epoch": 0.10007942811755362,
"grad_norm": 1.011226773262024,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.6327,
"step": 63
},
{
"epoch": 0.10007942811755362,
"eval_loss": 0.6306856870651245,
"eval_runtime": 24.7366,
"eval_samples_per_second": 21.426,
"eval_steps_per_second": 2.709,
"step": 63
},
{
"epoch": 0.10166799046862589,
"grad_norm": 0.9041082262992859,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.2446,
"step": 64
},
{
"epoch": 0.10325655281969817,
"grad_norm": 1.7981657981872559,
"learning_rate": 3.289899283371657e-05,
"loss": 1.3239,
"step": 65
},
{
"epoch": 0.10484511517077046,
"grad_norm": 1.7157872915267944,
"learning_rate": 3.12696703292044e-05,
"loss": 0.5888,
"step": 66
},
{
"epoch": 0.10643367752184273,
"grad_norm": 1.5570635795593262,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.9827,
"step": 67
},
{
"epoch": 0.10802223987291501,
"grad_norm": 1.05362868309021,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.6535,
"step": 68
},
{
"epoch": 0.1096108022239873,
"grad_norm": 1.386989951133728,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.8059,
"step": 69
},
{
"epoch": 0.11119936457505956,
"grad_norm": 1.5930371284484863,
"learning_rate": 2.500000000000001e-05,
"loss": 0.8848,
"step": 70
},
{
"epoch": 0.11278792692613185,
"grad_norm": 0.6902157664299011,
"learning_rate": 2.350403678833976e-05,
"loss": 0.1778,
"step": 71
},
{
"epoch": 0.11437648927720413,
"grad_norm": 1.5689494609832764,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.6893,
"step": 72
},
{
"epoch": 0.11437648927720413,
"eval_loss": 0.571943998336792,
"eval_runtime": 25.0278,
"eval_samples_per_second": 21.176,
"eval_steps_per_second": 2.677,
"step": 72
},
{
"epoch": 0.11596505162827642,
"grad_norm": 1.9662429094314575,
"learning_rate": 2.061073738537635e-05,
"loss": 0.8679,
"step": 73
},
{
"epoch": 0.11755361397934869,
"grad_norm": 0.8315130472183228,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.3787,
"step": 74
},
{
"epoch": 0.11914217633042097,
"grad_norm": 1.5936318635940552,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.8645,
"step": 75
},
{
"epoch": 0.12073073868149325,
"grad_norm": 1.0505174398422241,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.4891,
"step": 76
},
{
"epoch": 0.12231930103256553,
"grad_norm": 2.664303779602051,
"learning_rate": 1.526708147705013e-05,
"loss": 0.8714,
"step": 77
},
{
"epoch": 0.12390786338363781,
"grad_norm": 6.456088066101074,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.4106,
"step": 78
},
{
"epoch": 0.1254964257347101,
"grad_norm": 5.035181045532227,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.1116,
"step": 79
},
{
"epoch": 0.12708498808578236,
"grad_norm": 1.2827062606811523,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.6038,
"step": 80
},
{
"epoch": 0.12867355043685463,
"grad_norm": 1.571867823600769,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.0119,
"step": 81
},
{
"epoch": 0.12867355043685463,
"eval_loss": 0.5474610328674316,
"eval_runtime": 25.0332,
"eval_samples_per_second": 21.172,
"eval_steps_per_second": 2.676,
"step": 81
},
{
"epoch": 0.13026211278792693,
"grad_norm": 2.406301736831665,
"learning_rate": 9.549150281252633e-06,
"loss": 0.4819,
"step": 82
},
{
"epoch": 0.1318506751389992,
"grad_norm": 1.4583051204681396,
"learning_rate": 8.548121372247918e-06,
"loss": 0.7409,
"step": 83
},
{
"epoch": 0.13343923749007147,
"grad_norm": 1.6111057996749878,
"learning_rate": 7.597595192178702e-06,
"loss": 0.6727,
"step": 84
},
{
"epoch": 0.13502779984114377,
"grad_norm": 1.4775164127349854,
"learning_rate": 6.698729810778065e-06,
"loss": 0.4757,
"step": 85
},
{
"epoch": 0.13661636219221604,
"grad_norm": 2.2460825443267822,
"learning_rate": 5.852620357053651e-06,
"loss": 0.8622,
"step": 86
},
{
"epoch": 0.13820492454328834,
"grad_norm": 2.399625778198242,
"learning_rate": 5.060297685041659e-06,
"loss": 0.9663,
"step": 87
},
{
"epoch": 0.1397934868943606,
"grad_norm": 1.7391408681869507,
"learning_rate": 4.322727117869951e-06,
"loss": 0.8286,
"step": 88
},
{
"epoch": 0.14138204924543288,
"grad_norm": 1.061781406402588,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.3296,
"step": 89
},
{
"epoch": 0.14297061159650518,
"grad_norm": 0.9983460903167725,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.2936,
"step": 90
},
{
"epoch": 0.14297061159650518,
"eval_loss": 0.5319394469261169,
"eval_runtime": 24.9759,
"eval_samples_per_second": 21.22,
"eval_steps_per_second": 2.683,
"step": 90
},
{
"epoch": 0.14455917394757745,
"grad_norm": 1.3195279836654663,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.4158,
"step": 91
},
{
"epoch": 0.14614773629864972,
"grad_norm": 1.5764867067337036,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.9167,
"step": 92
},
{
"epoch": 0.14773629864972201,
"grad_norm": 1.1892976760864258,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.2918,
"step": 93
},
{
"epoch": 0.14932486100079428,
"grad_norm": 1.2646450996398926,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.5086,
"step": 94
},
{
"epoch": 0.15091342335186655,
"grad_norm": 0.7004561424255371,
"learning_rate": 7.596123493895991e-07,
"loss": 0.3725,
"step": 95
},
{
"epoch": 0.15250198570293885,
"grad_norm": 1.6172034740447998,
"learning_rate": 4.865965629214819e-07,
"loss": 0.6218,
"step": 96
},
{
"epoch": 0.15409054805401112,
"grad_norm": 0.9688251614570618,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.4396,
"step": 97
},
{
"epoch": 0.1556791104050834,
"grad_norm": 1.4910237789154053,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.6491,
"step": 98
},
{
"epoch": 0.1572676727561557,
"grad_norm": 2.0912060737609863,
"learning_rate": 3.04586490452119e-08,
"loss": 0.8586,
"step": 99
},
{
"epoch": 0.1572676727561557,
"eval_loss": 0.5280284881591797,
"eval_runtime": 24.9771,
"eval_samples_per_second": 21.219,
"eval_steps_per_second": 2.682,
"step": 99
},
{
"epoch": 0.15885623510722796,
"grad_norm": 0.985113263130188,
"learning_rate": 0.0,
"loss": 0.4754,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3499729845420032e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}