lesso01's picture
Training in progress, step 100, checkpoint
5468aa4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7782101167315175,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007782101167315175,
"grad_norm": 0.15050330758094788,
"learning_rate": 1e-05,
"loss": 10.3688,
"step": 1
},
{
"epoch": 0.007782101167315175,
"eval_loss": NaN,
"eval_runtime": 0.3507,
"eval_samples_per_second": 307.972,
"eval_steps_per_second": 39.922,
"step": 1
},
{
"epoch": 0.01556420233463035,
"grad_norm": 0.2052978277206421,
"learning_rate": 2e-05,
"loss": 10.3736,
"step": 2
},
{
"epoch": 0.023346303501945526,
"grad_norm": 0.22770772874355316,
"learning_rate": 3e-05,
"loss": 10.3135,
"step": 3
},
{
"epoch": 0.0311284046692607,
"grad_norm": 0.20140793919563293,
"learning_rate": 4e-05,
"loss": 10.2939,
"step": 4
},
{
"epoch": 0.038910505836575876,
"grad_norm": 0.19401751458644867,
"learning_rate": 5e-05,
"loss": 10.3256,
"step": 5
},
{
"epoch": 0.04669260700389105,
"grad_norm": 0.18698450922966003,
"learning_rate": 6e-05,
"loss": 10.3705,
"step": 6
},
{
"epoch": 0.054474708171206226,
"grad_norm": 0.27336716651916504,
"learning_rate": 7e-05,
"loss": 10.3191,
"step": 7
},
{
"epoch": 0.0622568093385214,
"grad_norm": 0.22764867544174194,
"learning_rate": 8e-05,
"loss": 10.3599,
"step": 8
},
{
"epoch": 0.07003891050583658,
"grad_norm": 0.21836555004119873,
"learning_rate": 9e-05,
"loss": 10.3509,
"step": 9
},
{
"epoch": 0.07003891050583658,
"eval_loss": NaN,
"eval_runtime": 0.3723,
"eval_samples_per_second": 290.06,
"eval_steps_per_second": 37.6,
"step": 9
},
{
"epoch": 0.07782101167315175,
"grad_norm": 0.09972412884235382,
"learning_rate": 0.0001,
"loss": 10.3629,
"step": 10
},
{
"epoch": 0.08560311284046693,
"grad_norm": 0.1307542771100998,
"learning_rate": 9.99695413509548e-05,
"loss": 10.3581,
"step": 11
},
{
"epoch": 0.0933852140077821,
"grad_norm": 0.21208789944648743,
"learning_rate": 9.987820251299122e-05,
"loss": 10.3348,
"step": 12
},
{
"epoch": 0.10116731517509728,
"grad_norm": 0.22375652194023132,
"learning_rate": 9.972609476841367e-05,
"loss": 10.3007,
"step": 13
},
{
"epoch": 0.10894941634241245,
"grad_norm": 0.2047107070684433,
"learning_rate": 9.951340343707852e-05,
"loss": 10.32,
"step": 14
},
{
"epoch": 0.11673151750972763,
"grad_norm": 0.16958235204219818,
"learning_rate": 9.924038765061042e-05,
"loss": 10.3279,
"step": 15
},
{
"epoch": 0.1245136186770428,
"grad_norm": 0.26593777537345886,
"learning_rate": 9.890738003669029e-05,
"loss": 10.3441,
"step": 16
},
{
"epoch": 0.13229571984435798,
"grad_norm": 0.3536827862262726,
"learning_rate": 9.851478631379982e-05,
"loss": 10.3664,
"step": 17
},
{
"epoch": 0.14007782101167315,
"grad_norm": 0.38606491684913635,
"learning_rate": 9.806308479691595e-05,
"loss": 10.2935,
"step": 18
},
{
"epoch": 0.14007782101167315,
"eval_loss": NaN,
"eval_runtime": 0.3716,
"eval_samples_per_second": 290.664,
"eval_steps_per_second": 37.679,
"step": 18
},
{
"epoch": 0.14785992217898833,
"grad_norm": 0.0,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.1556420233463035,
"grad_norm": 0.0,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.16342412451361868,
"grad_norm": 0.3168112635612488,
"learning_rate": 9.635919272833938e-05,
"loss": 10.3187,
"step": 21
},
{
"epoch": 0.17120622568093385,
"grad_norm": 0.0,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.17898832684824903,
"grad_norm": 0.2514454126358032,
"learning_rate": 9.493970231495835e-05,
"loss": 10.3512,
"step": 23
},
{
"epoch": 0.1867704280155642,
"grad_norm": 0.0,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.19455252918287938,
"grad_norm": 0.21714404225349426,
"learning_rate": 9.330127018922194e-05,
"loss": 10.3402,
"step": 25
},
{
"epoch": 0.20233463035019456,
"grad_norm": 0.09935510903596878,
"learning_rate": 9.24024048078213e-05,
"loss": 10.367,
"step": 26
},
{
"epoch": 0.21011673151750973,
"grad_norm": 0.1893402338027954,
"learning_rate": 9.145187862775209e-05,
"loss": 10.343,
"step": 27
},
{
"epoch": 0.21011673151750973,
"eval_loss": NaN,
"eval_runtime": 0.396,
"eval_samples_per_second": 272.742,
"eval_steps_per_second": 35.355,
"step": 27
},
{
"epoch": 0.2178988326848249,
"grad_norm": 0.16203711926937103,
"learning_rate": 9.045084971874738e-05,
"loss": 10.3318,
"step": 28
},
{
"epoch": 0.22568093385214008,
"grad_norm": 0.19412550330162048,
"learning_rate": 8.940053768033609e-05,
"loss": 10.3913,
"step": 29
},
{
"epoch": 0.23346303501945526,
"grad_norm": 0.3511830270290375,
"learning_rate": 8.83022221559489e-05,
"loss": 10.2959,
"step": 30
},
{
"epoch": 0.24124513618677043,
"grad_norm": 0.2315189391374588,
"learning_rate": 8.715724127386972e-05,
"loss": 10.3486,
"step": 31
},
{
"epoch": 0.2490272373540856,
"grad_norm": 0.1658879965543747,
"learning_rate": 8.596699001693255e-05,
"loss": 10.3505,
"step": 32
},
{
"epoch": 0.25680933852140075,
"grad_norm": 0.1770622283220291,
"learning_rate": 8.473291852294987e-05,
"loss": 10.3333,
"step": 33
},
{
"epoch": 0.26459143968871596,
"grad_norm": 0.0,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.2723735408560311,
"grad_norm": 0.28501346707344055,
"learning_rate": 8.213938048432697e-05,
"loss": 10.2935,
"step": 35
},
{
"epoch": 0.2801556420233463,
"grad_norm": 0.2703624367713928,
"learning_rate": 8.07830737662829e-05,
"loss": 10.3251,
"step": 36
},
{
"epoch": 0.2801556420233463,
"eval_loss": NaN,
"eval_runtime": 0.3829,
"eval_samples_per_second": 282.076,
"eval_steps_per_second": 36.565,
"step": 36
},
{
"epoch": 0.28793774319066145,
"grad_norm": 0.16514138877391815,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3588,
"step": 37
},
{
"epoch": 0.29571984435797666,
"grad_norm": 0.17835251986980438,
"learning_rate": 7.795964517353735e-05,
"loss": 10.3294,
"step": 38
},
{
"epoch": 0.3035019455252918,
"grad_norm": 0.0,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.311284046692607,
"grad_norm": 0.35127612948417664,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3466,
"step": 40
},
{
"epoch": 0.31906614785992216,
"grad_norm": 0.23155651986598969,
"learning_rate": 7.347357813929454e-05,
"loss": 10.3373,
"step": 41
},
{
"epoch": 0.32684824902723736,
"grad_norm": 0.31581297516822815,
"learning_rate": 7.191855733945387e-05,
"loss": 10.3105,
"step": 42
},
{
"epoch": 0.3346303501945525,
"grad_norm": 0.0,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.3424124513618677,
"grad_norm": 0.1989944577217102,
"learning_rate": 6.873032967079561e-05,
"loss": 10.3353,
"step": 44
},
{
"epoch": 0.35019455252918286,
"grad_norm": 0.10988571494817734,
"learning_rate": 6.710100716628344e-05,
"loss": 10.3591,
"step": 45
},
{
"epoch": 0.35019455252918286,
"eval_loss": NaN,
"eval_runtime": 0.4084,
"eval_samples_per_second": 264.436,
"eval_steps_per_second": 34.279,
"step": 45
},
{
"epoch": 0.35797665369649806,
"grad_norm": 0.2702690362930298,
"learning_rate": 6.545084971874738e-05,
"loss": 10.3186,
"step": 46
},
{
"epoch": 0.3657587548638132,
"grad_norm": 0.2404761016368866,
"learning_rate": 6.378186779084995e-05,
"loss": 10.3323,
"step": 47
},
{
"epoch": 0.3735408560311284,
"grad_norm": 0.24785469472408295,
"learning_rate": 6.209609477998338e-05,
"loss": 10.2847,
"step": 48
},
{
"epoch": 0.38132295719844356,
"grad_norm": 0.17176614701747894,
"learning_rate": 6.0395584540887963e-05,
"loss": 10.3431,
"step": 49
},
{
"epoch": 0.38910505836575876,
"grad_norm": 0.2749108672142029,
"learning_rate": 5.868240888334653e-05,
"loss": 10.3002,
"step": 50
},
{
"epoch": 0.3968871595330739,
"grad_norm": 0.0,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.4046692607003891,
"grad_norm": 0.18099191784858704,
"learning_rate": 5.522642316338268e-05,
"loss": 10.3323,
"step": 52
},
{
"epoch": 0.41245136186770426,
"grad_norm": 0.10944093763828278,
"learning_rate": 5.348782368720626e-05,
"loss": 10.347,
"step": 53
},
{
"epoch": 0.42023346303501946,
"grad_norm": 0.27111586928367615,
"learning_rate": 5.174497483512506e-05,
"loss": 10.2841,
"step": 54
},
{
"epoch": 0.42023346303501946,
"eval_loss": NaN,
"eval_runtime": 0.3823,
"eval_samples_per_second": 282.498,
"eval_steps_per_second": 36.62,
"step": 54
},
{
"epoch": 0.4280155642023346,
"grad_norm": 0.23218759894371033,
"learning_rate": 5e-05,
"loss": 10.3536,
"step": 55
},
{
"epoch": 0.4357976653696498,
"grad_norm": 0.32123154401779175,
"learning_rate": 4.825502516487497e-05,
"loss": 10.2888,
"step": 56
},
{
"epoch": 0.44357976653696496,
"grad_norm": 0.11285388469696045,
"learning_rate": 4.6512176312793736e-05,
"loss": 10.3688,
"step": 57
},
{
"epoch": 0.45136186770428016,
"grad_norm": 0.2072630375623703,
"learning_rate": 4.477357683661734e-05,
"loss": 10.3193,
"step": 58
},
{
"epoch": 0.4591439688715953,
"grad_norm": 0.22816087305545807,
"learning_rate": 4.3041344951996746e-05,
"loss": 10.3542,
"step": 59
},
{
"epoch": 0.4669260700389105,
"grad_norm": 0.2683489918708801,
"learning_rate": 4.131759111665349e-05,
"loss": 10.309,
"step": 60
},
{
"epoch": 0.47470817120622566,
"grad_norm": 0.20230989158153534,
"learning_rate": 3.960441545911204e-05,
"loss": 10.3389,
"step": 61
},
{
"epoch": 0.48249027237354086,
"grad_norm": 0.5085319876670837,
"learning_rate": 3.790390522001662e-05,
"loss": 10.3247,
"step": 62
},
{
"epoch": 0.490272373540856,
"grad_norm": 0.25417184829711914,
"learning_rate": 3.6218132209150045e-05,
"loss": 10.3218,
"step": 63
},
{
"epoch": 0.490272373540856,
"eval_loss": NaN,
"eval_runtime": 0.3868,
"eval_samples_per_second": 279.247,
"eval_steps_per_second": 36.199,
"step": 63
},
{
"epoch": 0.4980544747081712,
"grad_norm": 0.0,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.5058365758754864,
"grad_norm": 0.0,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.5136186770428015,
"grad_norm": 0.35637524724006653,
"learning_rate": 3.12696703292044e-05,
"loss": 10.2719,
"step": 66
},
{
"epoch": 0.5214007782101168,
"grad_norm": 0.31268998980522156,
"learning_rate": 2.9663167846209998e-05,
"loss": 10.3118,
"step": 67
},
{
"epoch": 0.5291828793774319,
"grad_norm": 0.17281199991703033,
"learning_rate": 2.8081442660546125e-05,
"loss": 10.3455,
"step": 68
},
{
"epoch": 0.5369649805447471,
"grad_norm": 0.18739189207553864,
"learning_rate": 2.6526421860705473e-05,
"loss": 10.3066,
"step": 69
},
{
"epoch": 0.5447470817120622,
"grad_norm": 0.1419634222984314,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3226,
"step": 70
},
{
"epoch": 0.5525291828793775,
"grad_norm": 0.27168574929237366,
"learning_rate": 2.350403678833976e-05,
"loss": 10.3416,
"step": 71
},
{
"epoch": 0.5603112840466926,
"grad_norm": 0.44093209505081177,
"learning_rate": 2.2040354826462668e-05,
"loss": 10.2672,
"step": 72
},
{
"epoch": 0.5603112840466926,
"eval_loss": NaN,
"eval_runtime": 0.4726,
"eval_samples_per_second": 228.515,
"eval_steps_per_second": 29.622,
"step": 72
},
{
"epoch": 0.5680933852140078,
"grad_norm": 0.3372398316860199,
"learning_rate": 2.061073738537635e-05,
"loss": 10.3153,
"step": 73
},
{
"epoch": 0.5758754863813229,
"grad_norm": 0.3003866672515869,
"learning_rate": 1.9216926233717085e-05,
"loss": 10.3436,
"step": 74
},
{
"epoch": 0.5836575875486382,
"grad_norm": 0.163679301738739,
"learning_rate": 1.7860619515673033e-05,
"loss": 10.357,
"step": 75
},
{
"epoch": 0.5914396887159533,
"grad_norm": 0.0,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0,
"step": 76
},
{
"epoch": 0.5992217898832685,
"grad_norm": 0.4258427619934082,
"learning_rate": 1.526708147705013e-05,
"loss": 10.3103,
"step": 77
},
{
"epoch": 0.6070038910505836,
"grad_norm": 0.3290005326271057,
"learning_rate": 1.4033009983067452e-05,
"loss": 10.3019,
"step": 78
},
{
"epoch": 0.6147859922178989,
"grad_norm": 0.16638368368148804,
"learning_rate": 1.2842758726130283e-05,
"loss": 10.3301,
"step": 79
},
{
"epoch": 0.622568093385214,
"grad_norm": 0.0,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.6303501945525292,
"grad_norm": 0.20695820450782776,
"learning_rate": 1.0599462319663905e-05,
"loss": 10.3287,
"step": 81
},
{
"epoch": 0.6303501945525292,
"eval_loss": NaN,
"eval_runtime": 0.4656,
"eval_samples_per_second": 231.97,
"eval_steps_per_second": 30.07,
"step": 81
},
{
"epoch": 0.6381322957198443,
"grad_norm": 0.28333377838134766,
"learning_rate": 9.549150281252633e-06,
"loss": 10.3335,
"step": 82
},
{
"epoch": 0.6459143968871596,
"grad_norm": 0.0,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0,
"step": 83
},
{
"epoch": 0.6536964980544747,
"grad_norm": 0.0,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0,
"step": 84
},
{
"epoch": 0.6614785992217899,
"grad_norm": 0.22484956681728363,
"learning_rate": 6.698729810778065e-06,
"loss": 10.3235,
"step": 85
},
{
"epoch": 0.669260700389105,
"grad_norm": 0.15221866965293884,
"learning_rate": 5.852620357053651e-06,
"loss": 10.3531,
"step": 86
},
{
"epoch": 0.6770428015564203,
"grad_norm": 0.12713178992271423,
"learning_rate": 5.060297685041659e-06,
"loss": 10.3367,
"step": 87
},
{
"epoch": 0.6848249027237354,
"grad_norm": 0.17537634074687958,
"learning_rate": 4.322727117869951e-06,
"loss": 10.3255,
"step": 88
},
{
"epoch": 0.6926070038910506,
"grad_norm": 0.1998535841703415,
"learning_rate": 3.6408072716606346e-06,
"loss": 10.3831,
"step": 89
},
{
"epoch": 0.7003891050583657,
"grad_norm": 0.1796063929796219,
"learning_rate": 3.0153689607045845e-06,
"loss": 10.3197,
"step": 90
},
{
"epoch": 0.7003891050583657,
"eval_loss": NaN,
"eval_runtime": 0.4096,
"eval_samples_per_second": 263.683,
"eval_steps_per_second": 34.181,
"step": 90
},
{
"epoch": 0.708171206225681,
"grad_norm": 0.20497995615005493,
"learning_rate": 2.4471741852423237e-06,
"loss": 10.3191,
"step": 91
},
{
"epoch": 0.7159533073929961,
"grad_norm": 0.4476834833621979,
"learning_rate": 1.9369152030840556e-06,
"loss": 10.3319,
"step": 92
},
{
"epoch": 0.7237354085603113,
"grad_norm": 0.5001630187034607,
"learning_rate": 1.4852136862001764e-06,
"loss": 10.2647,
"step": 93
},
{
"epoch": 0.7315175097276264,
"grad_norm": 0.3534506857395172,
"learning_rate": 1.0926199633097157e-06,
"loss": 10.2811,
"step": 94
},
{
"epoch": 0.7392996108949417,
"grad_norm": 0.32725393772125244,
"learning_rate": 7.596123493895991e-07,
"loss": 10.3196,
"step": 95
},
{
"epoch": 0.7470817120622568,
"grad_norm": 0.1588638424873352,
"learning_rate": 4.865965629214819e-07,
"loss": 10.354,
"step": 96
},
{
"epoch": 0.754863813229572,
"grad_norm": 0.3359444737434387,
"learning_rate": 2.7390523158633554e-07,
"loss": 10.3324,
"step": 97
},
{
"epoch": 0.7626459143968871,
"grad_norm": 0.0,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0,
"step": 98
},
{
"epoch": 0.7704280155642024,
"grad_norm": 0.1817806214094162,
"learning_rate": 3.04586490452119e-08,
"loss": 10.3151,
"step": 99
},
{
"epoch": 0.7704280155642024,
"eval_loss": NaN,
"eval_runtime": 0.3852,
"eval_samples_per_second": 280.368,
"eval_steps_per_second": 36.344,
"step": 99
},
{
"epoch": 0.7782101167315175,
"grad_norm": 0.19820758700370789,
"learning_rate": 0.0,
"loss": 10.3539,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7400796192768.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}