lesso01's picture
Training in progress, step 100, checkpoint
4408dc9 verified
raw
history blame
20.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.007975435658172828,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.975435658172828e-05,
"grad_norm": 58.57548904418945,
"learning_rate": 1e-05,
"loss": 12.2331,
"step": 1
},
{
"epoch": 7.975435658172828e-05,
"eval_loss": 12.32491397857666,
"eval_runtime": 554.4191,
"eval_samples_per_second": 19.045,
"eval_steps_per_second": 2.381,
"step": 1
},
{
"epoch": 0.00015950871316345656,
"grad_norm": 50.0570182800293,
"learning_rate": 2e-05,
"loss": 12.694,
"step": 2
},
{
"epoch": 0.00023926306974518484,
"grad_norm": 54.007179260253906,
"learning_rate": 3e-05,
"loss": 12.2986,
"step": 3
},
{
"epoch": 0.0003190174263269131,
"grad_norm": 49.70405960083008,
"learning_rate": 4e-05,
"loss": 11.9994,
"step": 4
},
{
"epoch": 0.00039877178290864137,
"grad_norm": 44.04093933105469,
"learning_rate": 5e-05,
"loss": 9.5563,
"step": 5
},
{
"epoch": 0.0004785261394903697,
"grad_norm": 39.21303939819336,
"learning_rate": 6e-05,
"loss": 7.4503,
"step": 6
},
{
"epoch": 0.000558280496072098,
"grad_norm": 49.072200775146484,
"learning_rate": 7e-05,
"loss": 3.8217,
"step": 7
},
{
"epoch": 0.0006380348526538262,
"grad_norm": 19.435436248779297,
"learning_rate": 8e-05,
"loss": 0.8504,
"step": 8
},
{
"epoch": 0.0007177892092355545,
"grad_norm": 4.429564476013184,
"learning_rate": 9e-05,
"loss": 0.6351,
"step": 9
},
{
"epoch": 0.0007177892092355545,
"eval_loss": 0.4419458508491516,
"eval_runtime": 553.8414,
"eval_samples_per_second": 19.065,
"eval_steps_per_second": 2.383,
"step": 9
},
{
"epoch": 0.0007975435658172827,
"grad_norm": 0.0200470183044672,
"learning_rate": 0.0001,
"loss": 0.0003,
"step": 10
},
{
"epoch": 0.0008772979223990111,
"grad_norm": 15.89320182800293,
"learning_rate": 9.99695413509548e-05,
"loss": 0.8913,
"step": 11
},
{
"epoch": 0.0009570522789807393,
"grad_norm": 64.74942016601562,
"learning_rate": 9.987820251299122e-05,
"loss": 0.7326,
"step": 12
},
{
"epoch": 0.0010368066355624676,
"grad_norm": 2.8113746643066406,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0253,
"step": 13
},
{
"epoch": 0.001116560992144196,
"grad_norm": 0.020627321675419807,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0002,
"step": 14
},
{
"epoch": 0.001196315348725924,
"grad_norm": 3.4705936908721924,
"learning_rate": 9.924038765061042e-05,
"loss": 0.282,
"step": 15
},
{
"epoch": 0.0012760697053076525,
"grad_norm": 8.107240676879883,
"learning_rate": 9.890738003669029e-05,
"loss": 0.3368,
"step": 16
},
{
"epoch": 0.0013558240618893806,
"grad_norm": 7.4358367919921875,
"learning_rate": 9.851478631379982e-05,
"loss": 0.1098,
"step": 17
},
{
"epoch": 0.001435578418471109,
"grad_norm": 3.5417537689208984,
"learning_rate": 9.806308479691595e-05,
"loss": 0.1003,
"step": 18
},
{
"epoch": 0.001435578418471109,
"eval_loss": 0.06634360551834106,
"eval_runtime": 553.4057,
"eval_samples_per_second": 19.08,
"eval_steps_per_second": 2.385,
"step": 18
},
{
"epoch": 0.0015153327750528373,
"grad_norm": 1.3719432353973389,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0373,
"step": 19
},
{
"epoch": 0.0015950871316345655,
"grad_norm": 0.40530410408973694,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0055,
"step": 20
},
{
"epoch": 0.0016748414882162938,
"grad_norm": 4.31416130065918,
"learning_rate": 9.635919272833938e-05,
"loss": 0.1612,
"step": 21
},
{
"epoch": 0.0017545958447980222,
"grad_norm": 0.035329561680555344,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0005,
"step": 22
},
{
"epoch": 0.0018343502013797503,
"grad_norm": 0.30401676893234253,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0022,
"step": 23
},
{
"epoch": 0.0019141045579614787,
"grad_norm": 0.19815795123577118,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0013,
"step": 24
},
{
"epoch": 0.001993858914543207,
"grad_norm": 0.12030813843011856,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0008,
"step": 25
},
{
"epoch": 0.002073613271124935,
"grad_norm": 0.025320129469037056,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0003,
"step": 26
},
{
"epoch": 0.0021533676277066633,
"grad_norm": 0.9278749823570251,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0106,
"step": 27
},
{
"epoch": 0.0021533676277066633,
"eval_loss": 0.09537636488676071,
"eval_runtime": 553.5551,
"eval_samples_per_second": 19.075,
"eval_steps_per_second": 2.385,
"step": 27
},
{
"epoch": 0.002233121984288392,
"grad_norm": 0.07682035863399506,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0008,
"step": 28
},
{
"epoch": 0.00231287634087012,
"grad_norm": 5.222334384918213,
"learning_rate": 8.940053768033609e-05,
"loss": 0.1142,
"step": 29
},
{
"epoch": 0.002392630697451848,
"grad_norm": 7.269686222076416,
"learning_rate": 8.83022221559489e-05,
"loss": 0.4306,
"step": 30
},
{
"epoch": 0.0024723850540335768,
"grad_norm": 5.748641014099121,
"learning_rate": 8.715724127386972e-05,
"loss": 0.2281,
"step": 31
},
{
"epoch": 0.002552139410615305,
"grad_norm": 2.5787816047668457,
"learning_rate": 8.596699001693255e-05,
"loss": 0.1349,
"step": 32
},
{
"epoch": 0.002631893767197033,
"grad_norm": 3.364776611328125,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0688,
"step": 33
},
{
"epoch": 0.002711648123778761,
"grad_norm": 4.464049816131592,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0255,
"step": 34
},
{
"epoch": 0.00279140248036049,
"grad_norm": 1.2259021997451782,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0306,
"step": 35
},
{
"epoch": 0.002871156836942218,
"grad_norm": 0.13717339932918549,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0029,
"step": 36
},
{
"epoch": 0.002871156836942218,
"eval_loss": 0.042516034096479416,
"eval_runtime": 553.518,
"eval_samples_per_second": 19.076,
"eval_steps_per_second": 2.385,
"step": 36
},
{
"epoch": 0.002950911193523946,
"grad_norm": 2.9602785110473633,
"learning_rate": 7.938926261462366e-05,
"loss": 0.1396,
"step": 37
},
{
"epoch": 0.0030306655501056746,
"grad_norm": 0.29989227652549744,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0094,
"step": 38
},
{
"epoch": 0.003110419906687403,
"grad_norm": 1.6319737434387207,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0541,
"step": 39
},
{
"epoch": 0.003190174263269131,
"grad_norm": 1.022342324256897,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0178,
"step": 40
},
{
"epoch": 0.0032699286198508595,
"grad_norm": 0.27077966928482056,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0071,
"step": 41
},
{
"epoch": 0.0033496829764325877,
"grad_norm": 2.3816652297973633,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0555,
"step": 42
},
{
"epoch": 0.003429437333014316,
"grad_norm": 3.106501579284668,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0806,
"step": 43
},
{
"epoch": 0.0035091916895960444,
"grad_norm": 0.04690181091427803,
"learning_rate": 6.873032967079561e-05,
"loss": 0.001,
"step": 44
},
{
"epoch": 0.0035889460461777725,
"grad_norm": 0.15731936693191528,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0036,
"step": 45
},
{
"epoch": 0.0035889460461777725,
"eval_loss": 0.04225198179483414,
"eval_runtime": 553.5138,
"eval_samples_per_second": 19.076,
"eval_steps_per_second": 2.385,
"step": 45
},
{
"epoch": 0.0036687004027595007,
"grad_norm": 3.039454698562622,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0774,
"step": 46
},
{
"epoch": 0.003748454759341229,
"grad_norm": 0.4984506666660309,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0084,
"step": 47
},
{
"epoch": 0.0038282091159229574,
"grad_norm": 0.03135956823825836,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0005,
"step": 48
},
{
"epoch": 0.003907963472504686,
"grad_norm": 3.206134796142578,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0499,
"step": 49
},
{
"epoch": 0.003987717829086414,
"grad_norm": 4.812526226043701,
"learning_rate": 5.868240888334653e-05,
"loss": 0.2402,
"step": 50
},
{
"epoch": 0.004067472185668142,
"grad_norm": 0.019093716517090797,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0003,
"step": 51
},
{
"epoch": 0.00414722654224987,
"grad_norm": 6.016909122467041,
"learning_rate": 5.522642316338268e-05,
"loss": 0.1171,
"step": 52
},
{
"epoch": 0.0042269808988315985,
"grad_norm": 3.237635850906372,
"learning_rate": 5.348782368720626e-05,
"loss": 0.0479,
"step": 53
},
{
"epoch": 0.004306735255413327,
"grad_norm": 0.45389387011528015,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0071,
"step": 54
},
{
"epoch": 0.004306735255413327,
"eval_loss": 0.039176907390356064,
"eval_runtime": 553.614,
"eval_samples_per_second": 19.073,
"eval_steps_per_second": 2.384,
"step": 54
},
{
"epoch": 0.004386489611995055,
"grad_norm": 1.7394068241119385,
"learning_rate": 5e-05,
"loss": 0.0313,
"step": 55
},
{
"epoch": 0.004466243968576784,
"grad_norm": 1.723865270614624,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0238,
"step": 56
},
{
"epoch": 0.004545998325158512,
"grad_norm": 2.5933642387390137,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.0253,
"step": 57
},
{
"epoch": 0.00462575268174024,
"grad_norm": 0.04176973178982735,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0006,
"step": 58
},
{
"epoch": 0.004705507038321968,
"grad_norm": 3.8093903064727783,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0485,
"step": 59
},
{
"epoch": 0.004785261394903696,
"grad_norm": 4.592166900634766,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0812,
"step": 60
},
{
"epoch": 0.0048650157514854245,
"grad_norm": 0.9173868298530579,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0104,
"step": 61
},
{
"epoch": 0.0049447701080671536,
"grad_norm": 1.9208635091781616,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0213,
"step": 62
},
{
"epoch": 0.005024524464648882,
"grad_norm": 2.4270222187042236,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0342,
"step": 63
},
{
"epoch": 0.005024524464648882,
"eval_loss": 0.02897912636399269,
"eval_runtime": 553.584,
"eval_samples_per_second": 19.074,
"eval_steps_per_second": 2.384,
"step": 63
},
{
"epoch": 0.00510427882123061,
"grad_norm": 0.15133006870746613,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0014,
"step": 64
},
{
"epoch": 0.005184033177812338,
"grad_norm": 3.995213747024536,
"learning_rate": 3.289899283371657e-05,
"loss": 0.2232,
"step": 65
},
{
"epoch": 0.005263787534394066,
"grad_norm": 0.10878176242113113,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0011,
"step": 66
},
{
"epoch": 0.005343541890975794,
"grad_norm": 0.055173374712467194,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0007,
"step": 67
},
{
"epoch": 0.005423296247557522,
"grad_norm": 5.00130558013916,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.1167,
"step": 68
},
{
"epoch": 0.005503050604139251,
"grad_norm": 0.32802918553352356,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0026,
"step": 69
},
{
"epoch": 0.00558280496072098,
"grad_norm": 0.10490323603153229,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0014,
"step": 70
},
{
"epoch": 0.005662559317302708,
"grad_norm": 0.014080111868679523,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0002,
"step": 71
},
{
"epoch": 0.005742313673884436,
"grad_norm": 0.021097006276249886,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0005,
"step": 72
},
{
"epoch": 0.005742313673884436,
"eval_loss": 0.038278449326753616,
"eval_runtime": 553.4245,
"eval_samples_per_second": 19.079,
"eval_steps_per_second": 2.385,
"step": 72
},
{
"epoch": 0.005822068030466164,
"grad_norm": 0.014087294228374958,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0002,
"step": 73
},
{
"epoch": 0.005901822387047892,
"grad_norm": 0.00338623090647161,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0001,
"step": 74
},
{
"epoch": 0.005981576743629621,
"grad_norm": 0.0011795306345447898,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.006061331100211349,
"grad_norm": 5.963899612426758,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0601,
"step": 76
},
{
"epoch": 0.0061410854567930774,
"grad_norm": 0.037896182388067245,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0006,
"step": 77
},
{
"epoch": 0.006220839813374806,
"grad_norm": 0.24905402958393097,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0033,
"step": 78
},
{
"epoch": 0.006300594169956534,
"grad_norm": 0.02116982452571392,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0003,
"step": 79
},
{
"epoch": 0.006380348526538262,
"grad_norm": 0.001493555959314108,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.00646010288311999,
"grad_norm": 0.04112343490123749,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.0002,
"step": 81
},
{
"epoch": 0.00646010288311999,
"eval_loss": 0.03697933256626129,
"eval_runtime": 553.647,
"eval_samples_per_second": 19.072,
"eval_steps_per_second": 2.384,
"step": 81
},
{
"epoch": 0.006539857239701719,
"grad_norm": 0.852574348449707,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0052,
"step": 82
},
{
"epoch": 0.006619611596283447,
"grad_norm": 5.428268909454346,
"learning_rate": 8.548121372247918e-06,
"loss": 0.1395,
"step": 83
},
{
"epoch": 0.006699365952865175,
"grad_norm": 0.053005896508693695,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0007,
"step": 84
},
{
"epoch": 0.0067791203094469035,
"grad_norm": 1.3258788585662842,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0071,
"step": 85
},
{
"epoch": 0.006858874666028632,
"grad_norm": 0.125382199883461,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0009,
"step": 86
},
{
"epoch": 0.00693862902261036,
"grad_norm": 0.025496359914541245,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0003,
"step": 87
},
{
"epoch": 0.007018383379192089,
"grad_norm": 7.4097113609313965,
"learning_rate": 4.322727117869951e-06,
"loss": 0.1218,
"step": 88
},
{
"epoch": 0.007098137735773817,
"grad_norm": 0.0033981853630393744,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.0,
"step": 89
},
{
"epoch": 0.007177892092355545,
"grad_norm": 0.38592103123664856,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0025,
"step": 90
},
{
"epoch": 0.007177892092355545,
"eval_loss": 0.03521589934825897,
"eval_runtime": 553.4928,
"eval_samples_per_second": 19.077,
"eval_steps_per_second": 2.385,
"step": 90
},
{
"epoch": 0.007257646448937273,
"grad_norm": 0.0367887020111084,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0004,
"step": 91
},
{
"epoch": 0.007337400805519001,
"grad_norm": 0.030135678127408028,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0003,
"step": 92
},
{
"epoch": 0.0074171551621007295,
"grad_norm": 0.30348286032676697,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0022,
"step": 93
},
{
"epoch": 0.007496909518682458,
"grad_norm": 0.007436178158968687,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0001,
"step": 94
},
{
"epoch": 0.007576663875264187,
"grad_norm": 3.1514601707458496,
"learning_rate": 7.596123493895991e-07,
"loss": 0.1337,
"step": 95
},
{
"epoch": 0.007656418231845915,
"grad_norm": 4.589775085449219,
"learning_rate": 4.865965629214819e-07,
"loss": 0.2785,
"step": 96
},
{
"epoch": 0.007736172588427643,
"grad_norm": 1.7154332399368286,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0127,
"step": 97
},
{
"epoch": 0.007815926945009372,
"grad_norm": 4.0604634284973145,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0165,
"step": 98
},
{
"epoch": 0.0078956813015911,
"grad_norm": 10.015667915344238,
"learning_rate": 3.04586490452119e-08,
"loss": 0.1357,
"step": 99
},
{
"epoch": 0.0078956813015911,
"eval_loss": 0.03452092781662941,
"eval_runtime": 553.3103,
"eval_samples_per_second": 19.083,
"eval_steps_per_second": 2.386,
"step": 99
},
{
"epoch": 0.007975435658172828,
"grad_norm": 13.142882347106934,
"learning_rate": 0.0,
"loss": 0.1644,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.455967199782502e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}