lesso08's picture
Training in progress, step 100, checkpoint
f40be70 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.015659254619480114,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00015659254619480113,
"grad_norm": 0.9751595258712769,
"learning_rate": 1e-05,
"loss": 2.5701,
"step": 1
},
{
"epoch": 0.00015659254619480113,
"eval_loss": 2.5808298587799072,
"eval_runtime": 342.618,
"eval_samples_per_second": 15.697,
"eval_steps_per_second": 1.964,
"step": 1
},
{
"epoch": 0.00031318509238960227,
"grad_norm": 1.1219534873962402,
"learning_rate": 2e-05,
"loss": 2.1723,
"step": 2
},
{
"epoch": 0.0004697776385844034,
"grad_norm": 0.8830795288085938,
"learning_rate": 3e-05,
"loss": 2.7015,
"step": 3
},
{
"epoch": 0.0006263701847792045,
"grad_norm": 0.8768100142478943,
"learning_rate": 4e-05,
"loss": 2.4673,
"step": 4
},
{
"epoch": 0.0007829627309740056,
"grad_norm": 0.871509313583374,
"learning_rate": 5e-05,
"loss": 2.639,
"step": 5
},
{
"epoch": 0.0009395552771688068,
"grad_norm": 0.8949034214019775,
"learning_rate": 6e-05,
"loss": 2.8442,
"step": 6
},
{
"epoch": 0.001096147823363608,
"grad_norm": 0.8944017291069031,
"learning_rate": 7e-05,
"loss": 2.7632,
"step": 7
},
{
"epoch": 0.001252740369558409,
"grad_norm": 0.9067510366439819,
"learning_rate": 8e-05,
"loss": 2.5252,
"step": 8
},
{
"epoch": 0.0014093329157532101,
"grad_norm": 0.9196743965148926,
"learning_rate": 9e-05,
"loss": 2.4146,
"step": 9
},
{
"epoch": 0.0014093329157532101,
"eval_loss": 2.459617853164673,
"eval_runtime": 341.6715,
"eval_samples_per_second": 15.74,
"eval_steps_per_second": 1.97,
"step": 9
},
{
"epoch": 0.0015659254619480112,
"grad_norm": 1.0025465488433838,
"learning_rate": 0.0001,
"loss": 2.6063,
"step": 10
},
{
"epoch": 0.0017225180081428123,
"grad_norm": 0.8671764731407166,
"learning_rate": 9.99695413509548e-05,
"loss": 2.4126,
"step": 11
},
{
"epoch": 0.0018791105543376136,
"grad_norm": 0.8781599402427673,
"learning_rate": 9.987820251299122e-05,
"loss": 2.354,
"step": 12
},
{
"epoch": 0.0020357031005324147,
"grad_norm": 0.8639190793037415,
"learning_rate": 9.972609476841367e-05,
"loss": 2.1836,
"step": 13
},
{
"epoch": 0.002192295646727216,
"grad_norm": 0.8021327257156372,
"learning_rate": 9.951340343707852e-05,
"loss": 2.554,
"step": 14
},
{
"epoch": 0.002348888192922017,
"grad_norm": 1.0592883825302124,
"learning_rate": 9.924038765061042e-05,
"loss": 2.1199,
"step": 15
},
{
"epoch": 0.002505480739116818,
"grad_norm": 0.8209556341171265,
"learning_rate": 9.890738003669029e-05,
"loss": 2.3206,
"step": 16
},
{
"epoch": 0.002662073285311619,
"grad_norm": 0.7151332497596741,
"learning_rate": 9.851478631379982e-05,
"loss": 2.5804,
"step": 17
},
{
"epoch": 0.0028186658315064203,
"grad_norm": 0.7842516899108887,
"learning_rate": 9.806308479691595e-05,
"loss": 2.1459,
"step": 18
},
{
"epoch": 0.0028186658315064203,
"eval_loss": 1.9956408739089966,
"eval_runtime": 341.649,
"eval_samples_per_second": 15.741,
"eval_steps_per_second": 1.97,
"step": 18
},
{
"epoch": 0.0029752583777012216,
"grad_norm": 0.8641836643218994,
"learning_rate": 9.755282581475769e-05,
"loss": 1.7574,
"step": 19
},
{
"epoch": 0.0031318509238960224,
"grad_norm": 0.9240981936454773,
"learning_rate": 9.698463103929542e-05,
"loss": 1.8377,
"step": 20
},
{
"epoch": 0.0032884434700908237,
"grad_norm": 0.9617156386375427,
"learning_rate": 9.635919272833938e-05,
"loss": 2.0198,
"step": 21
},
{
"epoch": 0.0034450360162856246,
"grad_norm": 0.9704702496528625,
"learning_rate": 9.567727288213005e-05,
"loss": 2.1772,
"step": 22
},
{
"epoch": 0.003601628562480426,
"grad_norm": 0.7572294473648071,
"learning_rate": 9.493970231495835e-05,
"loss": 2.0255,
"step": 23
},
{
"epoch": 0.003758221108675227,
"grad_norm": 0.9574043154716492,
"learning_rate": 9.414737964294636e-05,
"loss": 1.8707,
"step": 24
},
{
"epoch": 0.0039148136548700285,
"grad_norm": 0.9383586049079895,
"learning_rate": 9.330127018922194e-05,
"loss": 2.091,
"step": 25
},
{
"epoch": 0.004071406201064829,
"grad_norm": 0.8978224396705627,
"learning_rate": 9.24024048078213e-05,
"loss": 1.6222,
"step": 26
},
{
"epoch": 0.00422799874725963,
"grad_norm": 0.910511314868927,
"learning_rate": 9.145187862775209e-05,
"loss": 1.4146,
"step": 27
},
{
"epoch": 0.00422799874725963,
"eval_loss": 1.7382819652557373,
"eval_runtime": 341.6711,
"eval_samples_per_second": 15.74,
"eval_steps_per_second": 1.97,
"step": 27
},
{
"epoch": 0.004384591293454432,
"grad_norm": 1.0003559589385986,
"learning_rate": 9.045084971874738e-05,
"loss": 1.4551,
"step": 28
},
{
"epoch": 0.004541183839649233,
"grad_norm": 0.9404575824737549,
"learning_rate": 8.940053768033609e-05,
"loss": 1.5461,
"step": 29
},
{
"epoch": 0.004697776385844034,
"grad_norm": 0.8040342330932617,
"learning_rate": 8.83022221559489e-05,
"loss": 1.3199,
"step": 30
},
{
"epoch": 0.0048543689320388345,
"grad_norm": 0.7769756317138672,
"learning_rate": 8.715724127386972e-05,
"loss": 1.7402,
"step": 31
},
{
"epoch": 0.005010961478233636,
"grad_norm": 0.6750996112823486,
"learning_rate": 8.596699001693255e-05,
"loss": 1.7677,
"step": 32
},
{
"epoch": 0.005167554024428437,
"grad_norm": 0.8155618906021118,
"learning_rate": 8.473291852294987e-05,
"loss": 1.2559,
"step": 33
},
{
"epoch": 0.005324146570623238,
"grad_norm": 0.8737456202507019,
"learning_rate": 8.345653031794292e-05,
"loss": 1.7302,
"step": 34
},
{
"epoch": 0.00548073911681804,
"grad_norm": 0.7429255247116089,
"learning_rate": 8.213938048432697e-05,
"loss": 1.5429,
"step": 35
},
{
"epoch": 0.005637331663012841,
"grad_norm": 0.787848174571991,
"learning_rate": 8.07830737662829e-05,
"loss": 1.8001,
"step": 36
},
{
"epoch": 0.005637331663012841,
"eval_loss": 1.5025882720947266,
"eval_runtime": 341.681,
"eval_samples_per_second": 15.74,
"eval_steps_per_second": 1.97,
"step": 36
},
{
"epoch": 0.0057939242092076414,
"grad_norm": 0.8274391293525696,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5021,
"step": 37
},
{
"epoch": 0.005950516755402443,
"grad_norm": 0.684043824672699,
"learning_rate": 7.795964517353735e-05,
"loss": 1.6989,
"step": 38
},
{
"epoch": 0.006107109301597244,
"grad_norm": 0.5877222418785095,
"learning_rate": 7.649596321166024e-05,
"loss": 1.9083,
"step": 39
},
{
"epoch": 0.006263701847792045,
"grad_norm": 0.8567363023757935,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3317,
"step": 40
},
{
"epoch": 0.006420294393986847,
"grad_norm": 0.789842963218689,
"learning_rate": 7.347357813929454e-05,
"loss": 1.233,
"step": 41
},
{
"epoch": 0.0065768869401816475,
"grad_norm": 0.8660836219787598,
"learning_rate": 7.191855733945387e-05,
"loss": 1.1597,
"step": 42
},
{
"epoch": 0.006733479486376448,
"grad_norm": 0.7116528153419495,
"learning_rate": 7.033683215379002e-05,
"loss": 1.2998,
"step": 43
},
{
"epoch": 0.006890072032571249,
"grad_norm": 0.7719988226890564,
"learning_rate": 6.873032967079561e-05,
"loss": 1.5096,
"step": 44
},
{
"epoch": 0.007046664578766051,
"grad_norm": 0.6935805678367615,
"learning_rate": 6.710100716628344e-05,
"loss": 1.6491,
"step": 45
},
{
"epoch": 0.007046664578766051,
"eval_loss": 1.310565710067749,
"eval_runtime": 341.6735,
"eval_samples_per_second": 15.74,
"eval_steps_per_second": 1.97,
"step": 45
},
{
"epoch": 0.007203257124960852,
"grad_norm": 0.6075599789619446,
"learning_rate": 6.545084971874738e-05,
"loss": 1.4503,
"step": 46
},
{
"epoch": 0.007359849671155653,
"grad_norm": 0.5467920899391174,
"learning_rate": 6.378186779084995e-05,
"loss": 1.5001,
"step": 47
},
{
"epoch": 0.007516442217350454,
"grad_norm": 0.6646414995193481,
"learning_rate": 6.209609477998338e-05,
"loss": 0.9366,
"step": 48
},
{
"epoch": 0.007673034763545255,
"grad_norm": 0.6338045597076416,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.3771,
"step": 49
},
{
"epoch": 0.007829627309740057,
"grad_norm": 0.6069777011871338,
"learning_rate": 5.868240888334653e-05,
"loss": 1.2211,
"step": 50
},
{
"epoch": 0.007986219855934857,
"grad_norm": 0.6526830196380615,
"learning_rate": 5.695865504800327e-05,
"loss": 1.5053,
"step": 51
},
{
"epoch": 0.008142812402129659,
"grad_norm": 0.5835939049720764,
"learning_rate": 5.522642316338268e-05,
"loss": 1.1738,
"step": 52
},
{
"epoch": 0.00829940494832446,
"grad_norm": 0.5789561867713928,
"learning_rate": 5.348782368720626e-05,
"loss": 1.4458,
"step": 53
},
{
"epoch": 0.00845599749451926,
"grad_norm": 0.6865813136100769,
"learning_rate": 5.174497483512506e-05,
"loss": 1.0369,
"step": 54
},
{
"epoch": 0.00845599749451926,
"eval_loss": 1.1971049308776855,
"eval_runtime": 341.6362,
"eval_samples_per_second": 15.742,
"eval_steps_per_second": 1.97,
"step": 54
},
{
"epoch": 0.008612590040714062,
"grad_norm": 0.5503248572349548,
"learning_rate": 5e-05,
"loss": 1.2641,
"step": 55
},
{
"epoch": 0.008769182586908864,
"grad_norm": 0.5388333201408386,
"learning_rate": 4.825502516487497e-05,
"loss": 1.5671,
"step": 56
},
{
"epoch": 0.008925775133103664,
"grad_norm": 0.5866682529449463,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.0836,
"step": 57
},
{
"epoch": 0.009082367679298466,
"grad_norm": 0.49063777923583984,
"learning_rate": 4.477357683661734e-05,
"loss": 1.4806,
"step": 58
},
{
"epoch": 0.009238960225493267,
"grad_norm": 0.5331360101699829,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.3006,
"step": 59
},
{
"epoch": 0.009395552771688067,
"grad_norm": 0.5101089477539062,
"learning_rate": 4.131759111665349e-05,
"loss": 1.1633,
"step": 60
},
{
"epoch": 0.009552145317882869,
"grad_norm": 0.5948686599731445,
"learning_rate": 3.960441545911204e-05,
"loss": 0.9675,
"step": 61
},
{
"epoch": 0.009708737864077669,
"grad_norm": 0.49615660309791565,
"learning_rate": 3.790390522001662e-05,
"loss": 1.5511,
"step": 62
},
{
"epoch": 0.00986533041027247,
"grad_norm": 0.5018711090087891,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.241,
"step": 63
},
{
"epoch": 0.00986533041027247,
"eval_loss": 1.130515694618225,
"eval_runtime": 341.6235,
"eval_samples_per_second": 15.742,
"eval_steps_per_second": 1.97,
"step": 63
},
{
"epoch": 0.010021922956467273,
"grad_norm": 0.6017839908599854,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.2535,
"step": 64
},
{
"epoch": 0.010178515502662073,
"grad_norm": 0.5347826480865479,
"learning_rate": 3.289899283371657e-05,
"loss": 1.1951,
"step": 65
},
{
"epoch": 0.010335108048856874,
"grad_norm": 0.5654888153076172,
"learning_rate": 3.12696703292044e-05,
"loss": 0.7469,
"step": 66
},
{
"epoch": 0.010491700595051676,
"grad_norm": 0.5342015624046326,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.1084,
"step": 67
},
{
"epoch": 0.010648293141246476,
"grad_norm": 0.5312244296073914,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.301,
"step": 68
},
{
"epoch": 0.010804885687441278,
"grad_norm": 0.5925493240356445,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.0394,
"step": 69
},
{
"epoch": 0.01096147823363608,
"grad_norm": 0.6024527549743652,
"learning_rate": 2.500000000000001e-05,
"loss": 1.3372,
"step": 70
},
{
"epoch": 0.01111807077983088,
"grad_norm": 0.5528858304023743,
"learning_rate": 2.350403678833976e-05,
"loss": 1.0018,
"step": 71
},
{
"epoch": 0.011274663326025681,
"grad_norm": 0.5777594447135925,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.0069,
"step": 72
},
{
"epoch": 0.011274663326025681,
"eval_loss": 1.09247887134552,
"eval_runtime": 341.6437,
"eval_samples_per_second": 15.742,
"eval_steps_per_second": 1.97,
"step": 72
},
{
"epoch": 0.011431255872220483,
"grad_norm": 0.5311472415924072,
"learning_rate": 2.061073738537635e-05,
"loss": 1.3165,
"step": 73
},
{
"epoch": 0.011587848418415283,
"grad_norm": 0.5156391263008118,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.93,
"step": 74
},
{
"epoch": 0.011744440964610085,
"grad_norm": 0.5066985487937927,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.6743,
"step": 75
},
{
"epoch": 0.011901033510804886,
"grad_norm": 0.49672675132751465,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.3063,
"step": 76
},
{
"epoch": 0.012057626056999686,
"grad_norm": 0.5522364377975464,
"learning_rate": 1.526708147705013e-05,
"loss": 0.8666,
"step": 77
},
{
"epoch": 0.012214218603194488,
"grad_norm": 0.6612314581871033,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.9022,
"step": 78
},
{
"epoch": 0.01237081114938929,
"grad_norm": 0.5317505598068237,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.0386,
"step": 79
},
{
"epoch": 0.01252740369558409,
"grad_norm": 0.5432467460632324,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.8287,
"step": 80
},
{
"epoch": 0.012683996241778892,
"grad_norm": 0.4582969844341278,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.4847,
"step": 81
},
{
"epoch": 0.012683996241778892,
"eval_loss": 1.0726039409637451,
"eval_runtime": 341.6866,
"eval_samples_per_second": 15.74,
"eval_steps_per_second": 1.97,
"step": 81
},
{
"epoch": 0.012840588787973693,
"grad_norm": 0.49256080389022827,
"learning_rate": 9.549150281252633e-06,
"loss": 1.5165,
"step": 82
},
{
"epoch": 0.012997181334168493,
"grad_norm": 0.4961276352405548,
"learning_rate": 8.548121372247918e-06,
"loss": 1.1499,
"step": 83
},
{
"epoch": 0.013153773880363295,
"grad_norm": 0.5461624264717102,
"learning_rate": 7.597595192178702e-06,
"loss": 0.8055,
"step": 84
},
{
"epoch": 0.013310366426558097,
"grad_norm": 0.49188461899757385,
"learning_rate": 6.698729810778065e-06,
"loss": 1.1544,
"step": 85
},
{
"epoch": 0.013466958972752897,
"grad_norm": 0.5389429330825806,
"learning_rate": 5.852620357053651e-06,
"loss": 1.3448,
"step": 86
},
{
"epoch": 0.013623551518947698,
"grad_norm": 0.521251916885376,
"learning_rate": 5.060297685041659e-06,
"loss": 1.0522,
"step": 87
},
{
"epoch": 0.013780144065142498,
"grad_norm": 0.5706208944320679,
"learning_rate": 4.322727117869951e-06,
"loss": 1.4151,
"step": 88
},
{
"epoch": 0.0139367366113373,
"grad_norm": 0.5204151272773743,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.1975,
"step": 89
},
{
"epoch": 0.014093329157532102,
"grad_norm": 0.6270158886909485,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.9621,
"step": 90
},
{
"epoch": 0.014093329157532102,
"eval_loss": 1.0648150444030762,
"eval_runtime": 341.792,
"eval_samples_per_second": 15.735,
"eval_steps_per_second": 1.969,
"step": 90
},
{
"epoch": 0.014249921703726902,
"grad_norm": 0.6548490524291992,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.9719,
"step": 91
},
{
"epoch": 0.014406514249921704,
"grad_norm": 0.5670599937438965,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.9317,
"step": 92
},
{
"epoch": 0.014563106796116505,
"grad_norm": 0.4700786769390106,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.766,
"step": 93
},
{
"epoch": 0.014719699342311305,
"grad_norm": 0.47002893686294556,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.5283,
"step": 94
},
{
"epoch": 0.014876291888506107,
"grad_norm": 0.46464380621910095,
"learning_rate": 7.596123493895991e-07,
"loss": 0.9799,
"step": 95
},
{
"epoch": 0.015032884434700909,
"grad_norm": 0.5638867020606995,
"learning_rate": 4.865965629214819e-07,
"loss": 1.1551,
"step": 96
},
{
"epoch": 0.015189476980895709,
"grad_norm": 0.49153435230255127,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.1155,
"step": 97
},
{
"epoch": 0.01534606952709051,
"grad_norm": 0.45866310596466064,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.049,
"step": 98
},
{
"epoch": 0.015502662073285312,
"grad_norm": 0.4746398329734802,
"learning_rate": 3.04586490452119e-08,
"loss": 1.6255,
"step": 99
},
{
"epoch": 0.015502662073285312,
"eval_loss": 1.063291311264038,
"eval_runtime": 341.6956,
"eval_samples_per_second": 15.739,
"eval_steps_per_second": 1.97,
"step": 99
},
{
"epoch": 0.015659254619480114,
"grad_norm": 0.6611264944076538,
"learning_rate": 0.0,
"loss": 1.0954,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.05873754406912e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}