lesso03's picture
Training in progress, step 100, checkpoint
d7f1ed8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.18165304268846502,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0018165304268846503,
"grad_norm": 0.3600485622882843,
"learning_rate": 1e-05,
"loss": 0.4664,
"step": 1
},
{
"epoch": 0.0018165304268846503,
"eval_loss": 0.4619494378566742,
"eval_runtime": 108.3327,
"eval_samples_per_second": 4.283,
"eval_steps_per_second": 0.535,
"step": 1
},
{
"epoch": 0.0036330608537693005,
"grad_norm": 0.33697664737701416,
"learning_rate": 2e-05,
"loss": 0.4766,
"step": 2
},
{
"epoch": 0.005449591280653951,
"grad_norm": 0.3403802812099457,
"learning_rate": 3e-05,
"loss": 0.4548,
"step": 3
},
{
"epoch": 0.007266121707538601,
"grad_norm": 0.3392443060874939,
"learning_rate": 4e-05,
"loss": 0.4661,
"step": 4
},
{
"epoch": 0.009082652134423252,
"grad_norm": 0.39870426058769226,
"learning_rate": 5e-05,
"loss": 0.4286,
"step": 5
},
{
"epoch": 0.010899182561307902,
"grad_norm": 0.3802020847797394,
"learning_rate": 6e-05,
"loss": 0.4409,
"step": 6
},
{
"epoch": 0.012715712988192553,
"grad_norm": 0.3801540732383728,
"learning_rate": 7e-05,
"loss": 0.4209,
"step": 7
},
{
"epoch": 0.014532243415077202,
"grad_norm": 0.38932061195373535,
"learning_rate": 8e-05,
"loss": 0.4188,
"step": 8
},
{
"epoch": 0.01634877384196185,
"grad_norm": 0.3350403308868408,
"learning_rate": 9e-05,
"loss": 0.396,
"step": 9
},
{
"epoch": 0.01634877384196185,
"eval_loss": 0.3949369788169861,
"eval_runtime": 108.084,
"eval_samples_per_second": 4.293,
"eval_steps_per_second": 0.537,
"step": 9
},
{
"epoch": 0.018165304268846504,
"grad_norm": 0.33399584889411926,
"learning_rate": 0.0001,
"loss": 0.3662,
"step": 10
},
{
"epoch": 0.019981834695731154,
"grad_norm": 0.3276408314704895,
"learning_rate": 9.99695413509548e-05,
"loss": 0.3859,
"step": 11
},
{
"epoch": 0.021798365122615803,
"grad_norm": 0.3103690445423126,
"learning_rate": 9.987820251299122e-05,
"loss": 0.3512,
"step": 12
},
{
"epoch": 0.023614895549500452,
"grad_norm": 0.45643332600593567,
"learning_rate": 9.972609476841367e-05,
"loss": 0.3913,
"step": 13
},
{
"epoch": 0.025431425976385105,
"grad_norm": 0.38945508003234863,
"learning_rate": 9.951340343707852e-05,
"loss": 0.3104,
"step": 14
},
{
"epoch": 0.027247956403269755,
"grad_norm": 0.33456993103027344,
"learning_rate": 9.924038765061042e-05,
"loss": 0.3705,
"step": 15
},
{
"epoch": 0.029064486830154404,
"grad_norm": 0.4321489632129669,
"learning_rate": 9.890738003669029e-05,
"loss": 0.3217,
"step": 16
},
{
"epoch": 0.030881017257039057,
"grad_norm": 0.3709757924079895,
"learning_rate": 9.851478631379982e-05,
"loss": 0.3113,
"step": 17
},
{
"epoch": 0.0326975476839237,
"grad_norm": 0.33215710520744324,
"learning_rate": 9.806308479691595e-05,
"loss": 0.3293,
"step": 18
},
{
"epoch": 0.0326975476839237,
"eval_loss": 0.32641759514808655,
"eval_runtime": 108.5742,
"eval_samples_per_second": 4.274,
"eval_steps_per_second": 0.534,
"step": 18
},
{
"epoch": 0.03451407811080836,
"grad_norm": 0.40216928720474243,
"learning_rate": 9.755282581475769e-05,
"loss": 0.385,
"step": 19
},
{
"epoch": 0.03633060853769301,
"grad_norm": 0.3322986662387848,
"learning_rate": 9.698463103929542e-05,
"loss": 0.2936,
"step": 20
},
{
"epoch": 0.03814713896457766,
"grad_norm": 0.339769572019577,
"learning_rate": 9.635919272833938e-05,
"loss": 0.329,
"step": 21
},
{
"epoch": 0.03996366939146231,
"grad_norm": 0.34345075488090515,
"learning_rate": 9.567727288213005e-05,
"loss": 0.2768,
"step": 22
},
{
"epoch": 0.04178019981834696,
"grad_norm": 0.3251555860042572,
"learning_rate": 9.493970231495835e-05,
"loss": 0.2831,
"step": 23
},
{
"epoch": 0.043596730245231606,
"grad_norm": 0.35843387246131897,
"learning_rate": 9.414737964294636e-05,
"loss": 0.402,
"step": 24
},
{
"epoch": 0.045413260672116255,
"grad_norm": 0.31343409419059753,
"learning_rate": 9.330127018922194e-05,
"loss": 0.2751,
"step": 25
},
{
"epoch": 0.047229791099000905,
"grad_norm": 0.3558681607246399,
"learning_rate": 9.24024048078213e-05,
"loss": 0.3265,
"step": 26
},
{
"epoch": 0.04904632152588556,
"grad_norm": 0.30473554134368896,
"learning_rate": 9.145187862775209e-05,
"loss": 0.241,
"step": 27
},
{
"epoch": 0.04904632152588556,
"eval_loss": 0.30456194281578064,
"eval_runtime": 108.8274,
"eval_samples_per_second": 4.264,
"eval_steps_per_second": 0.533,
"step": 27
},
{
"epoch": 0.05086285195277021,
"grad_norm": 0.4298734962940216,
"learning_rate": 9.045084971874738e-05,
"loss": 0.2689,
"step": 28
},
{
"epoch": 0.05267938237965486,
"grad_norm": 0.3488064110279083,
"learning_rate": 8.940053768033609e-05,
"loss": 0.2775,
"step": 29
},
{
"epoch": 0.05449591280653951,
"grad_norm": 0.4139954745769501,
"learning_rate": 8.83022221559489e-05,
"loss": 0.3113,
"step": 30
},
{
"epoch": 0.05631244323342416,
"grad_norm": 0.3354235887527466,
"learning_rate": 8.715724127386972e-05,
"loss": 0.3051,
"step": 31
},
{
"epoch": 0.05812897366030881,
"grad_norm": 0.41172316670417786,
"learning_rate": 8.596699001693255e-05,
"loss": 0.3233,
"step": 32
},
{
"epoch": 0.05994550408719346,
"grad_norm": 0.3322085738182068,
"learning_rate": 8.473291852294987e-05,
"loss": 0.3209,
"step": 33
},
{
"epoch": 0.061762034514078114,
"grad_norm": 0.3583935797214508,
"learning_rate": 8.345653031794292e-05,
"loss": 0.3255,
"step": 34
},
{
"epoch": 0.06357856494096276,
"grad_norm": 0.2800469696521759,
"learning_rate": 8.213938048432697e-05,
"loss": 0.3104,
"step": 35
},
{
"epoch": 0.0653950953678474,
"grad_norm": 0.4980929493904114,
"learning_rate": 8.07830737662829e-05,
"loss": 0.3561,
"step": 36
},
{
"epoch": 0.0653950953678474,
"eval_loss": 0.2919258773326874,
"eval_runtime": 108.6083,
"eval_samples_per_second": 4.272,
"eval_steps_per_second": 0.534,
"step": 36
},
{
"epoch": 0.06721162579473206,
"grad_norm": 0.3218163549900055,
"learning_rate": 7.938926261462366e-05,
"loss": 0.2759,
"step": 37
},
{
"epoch": 0.06902815622161672,
"grad_norm": 0.29215186834335327,
"learning_rate": 7.795964517353735e-05,
"loss": 0.3034,
"step": 38
},
{
"epoch": 0.07084468664850137,
"grad_norm": 0.3236890733242035,
"learning_rate": 7.649596321166024e-05,
"loss": 0.278,
"step": 39
},
{
"epoch": 0.07266121707538602,
"grad_norm": 0.3118452727794647,
"learning_rate": 7.500000000000001e-05,
"loss": 0.3184,
"step": 40
},
{
"epoch": 0.07447774750227067,
"grad_norm": 0.2818981111049652,
"learning_rate": 7.347357813929454e-05,
"loss": 0.2823,
"step": 41
},
{
"epoch": 0.07629427792915532,
"grad_norm": 0.2725626230239868,
"learning_rate": 7.191855733945387e-05,
"loss": 0.2795,
"step": 42
},
{
"epoch": 0.07811080835603997,
"grad_norm": 0.28230541944503784,
"learning_rate": 7.033683215379002e-05,
"loss": 0.2023,
"step": 43
},
{
"epoch": 0.07992733878292461,
"grad_norm": 0.24861739575862885,
"learning_rate": 6.873032967079561e-05,
"loss": 0.2655,
"step": 44
},
{
"epoch": 0.08174386920980926,
"grad_norm": 0.29663726687431335,
"learning_rate": 6.710100716628344e-05,
"loss": 0.2904,
"step": 45
},
{
"epoch": 0.08174386920980926,
"eval_loss": 0.2852410078048706,
"eval_runtime": 108.7895,
"eval_samples_per_second": 4.265,
"eval_steps_per_second": 0.533,
"step": 45
},
{
"epoch": 0.08356039963669391,
"grad_norm": 0.27381181716918945,
"learning_rate": 6.545084971874738e-05,
"loss": 0.2109,
"step": 46
},
{
"epoch": 0.08537693006357856,
"grad_norm": 0.3276028335094452,
"learning_rate": 6.378186779084995e-05,
"loss": 0.3141,
"step": 47
},
{
"epoch": 0.08719346049046321,
"grad_norm": 0.2625039219856262,
"learning_rate": 6.209609477998338e-05,
"loss": 0.2521,
"step": 48
},
{
"epoch": 0.08900999091734786,
"grad_norm": 0.31556904315948486,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.2891,
"step": 49
},
{
"epoch": 0.09082652134423251,
"grad_norm": 0.2701485753059387,
"learning_rate": 5.868240888334653e-05,
"loss": 0.2474,
"step": 50
},
{
"epoch": 0.09264305177111716,
"grad_norm": 0.31370481848716736,
"learning_rate": 5.695865504800327e-05,
"loss": 0.2895,
"step": 51
},
{
"epoch": 0.09445958219800181,
"grad_norm": 0.26035645604133606,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2501,
"step": 52
},
{
"epoch": 0.09627611262488647,
"grad_norm": 0.3116852343082428,
"learning_rate": 5.348782368720626e-05,
"loss": 0.2527,
"step": 53
},
{
"epoch": 0.09809264305177112,
"grad_norm": 0.2944983243942261,
"learning_rate": 5.174497483512506e-05,
"loss": 0.2754,
"step": 54
},
{
"epoch": 0.09809264305177112,
"eval_loss": 0.27958133816719055,
"eval_runtime": 108.6945,
"eval_samples_per_second": 4.269,
"eval_steps_per_second": 0.534,
"step": 54
},
{
"epoch": 0.09990917347865577,
"grad_norm": 0.350281685590744,
"learning_rate": 5e-05,
"loss": 0.2596,
"step": 55
},
{
"epoch": 0.10172570390554042,
"grad_norm": 0.33483561873435974,
"learning_rate": 4.825502516487497e-05,
"loss": 0.3078,
"step": 56
},
{
"epoch": 0.10354223433242507,
"grad_norm": 0.28896334767341614,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.2864,
"step": 57
},
{
"epoch": 0.10535876475930972,
"grad_norm": 0.27719321846961975,
"learning_rate": 4.477357683661734e-05,
"loss": 0.2958,
"step": 58
},
{
"epoch": 0.10717529518619437,
"grad_norm": 0.31106963753700256,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.2851,
"step": 59
},
{
"epoch": 0.10899182561307902,
"grad_norm": 0.253791481256485,
"learning_rate": 4.131759111665349e-05,
"loss": 0.2168,
"step": 60
},
{
"epoch": 0.11080835603996367,
"grad_norm": 0.37355419993400574,
"learning_rate": 3.960441545911204e-05,
"loss": 0.3131,
"step": 61
},
{
"epoch": 0.11262488646684832,
"grad_norm": 0.40072622895240784,
"learning_rate": 3.790390522001662e-05,
"loss": 0.3205,
"step": 62
},
{
"epoch": 0.11444141689373297,
"grad_norm": 0.27985572814941406,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.2427,
"step": 63
},
{
"epoch": 0.11444141689373297,
"eval_loss": 0.27756959199905396,
"eval_runtime": 108.6448,
"eval_samples_per_second": 4.271,
"eval_steps_per_second": 0.534,
"step": 63
},
{
"epoch": 0.11625794732061762,
"grad_norm": 0.28662416338920593,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.3106,
"step": 64
},
{
"epoch": 0.11807447774750227,
"grad_norm": 0.29447686672210693,
"learning_rate": 3.289899283371657e-05,
"loss": 0.2368,
"step": 65
},
{
"epoch": 0.11989100817438691,
"grad_norm": 0.2652858793735504,
"learning_rate": 3.12696703292044e-05,
"loss": 0.2319,
"step": 66
},
{
"epoch": 0.12170753860127158,
"grad_norm": 0.29174622893333435,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.3058,
"step": 67
},
{
"epoch": 0.12352406902815623,
"grad_norm": 0.30116814374923706,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2818,
"step": 68
},
{
"epoch": 0.12534059945504086,
"grad_norm": 0.30650588870048523,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.2743,
"step": 69
},
{
"epoch": 0.1271571298819255,
"grad_norm": 0.27553582191467285,
"learning_rate": 2.500000000000001e-05,
"loss": 0.2155,
"step": 70
},
{
"epoch": 0.12897366030881016,
"grad_norm": 0.2803662121295929,
"learning_rate": 2.350403678833976e-05,
"loss": 0.2782,
"step": 71
},
{
"epoch": 0.1307901907356948,
"grad_norm": 0.26244497299194336,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.1971,
"step": 72
},
{
"epoch": 0.1307901907356948,
"eval_loss": 0.2747555673122406,
"eval_runtime": 108.6235,
"eval_samples_per_second": 4.272,
"eval_steps_per_second": 0.534,
"step": 72
},
{
"epoch": 0.13260672116257946,
"grad_norm": 0.29561930894851685,
"learning_rate": 2.061073738537635e-05,
"loss": 0.3123,
"step": 73
},
{
"epoch": 0.1344232515894641,
"grad_norm": 0.3003482520580292,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.3104,
"step": 74
},
{
"epoch": 0.1362397820163488,
"grad_norm": 0.2806616425514221,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.2908,
"step": 75
},
{
"epoch": 0.13805631244323344,
"grad_norm": 0.331093430519104,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.2559,
"step": 76
},
{
"epoch": 0.13987284287011809,
"grad_norm": 0.2987718880176544,
"learning_rate": 1.526708147705013e-05,
"loss": 0.2594,
"step": 77
},
{
"epoch": 0.14168937329700274,
"grad_norm": 0.2768915593624115,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.274,
"step": 78
},
{
"epoch": 0.14350590372388738,
"grad_norm": 0.28360041975975037,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.2638,
"step": 79
},
{
"epoch": 0.14532243415077203,
"grad_norm": 0.28000327944755554,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.2937,
"step": 80
},
{
"epoch": 0.14713896457765668,
"grad_norm": 0.3231543004512787,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.2612,
"step": 81
},
{
"epoch": 0.14713896457765668,
"eval_loss": 0.27281635999679565,
"eval_runtime": 108.6714,
"eval_samples_per_second": 4.27,
"eval_steps_per_second": 0.534,
"step": 81
},
{
"epoch": 0.14895549500454133,
"grad_norm": 0.352360337972641,
"learning_rate": 9.549150281252633e-06,
"loss": 0.2377,
"step": 82
},
{
"epoch": 0.15077202543142598,
"grad_norm": 0.29913556575775146,
"learning_rate": 8.548121372247918e-06,
"loss": 0.297,
"step": 83
},
{
"epoch": 0.15258855585831063,
"grad_norm": 0.3100139796733856,
"learning_rate": 7.597595192178702e-06,
"loss": 0.3254,
"step": 84
},
{
"epoch": 0.15440508628519528,
"grad_norm": 0.2626723051071167,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2542,
"step": 85
},
{
"epoch": 0.15622161671207993,
"grad_norm": 0.2944123148918152,
"learning_rate": 5.852620357053651e-06,
"loss": 0.3606,
"step": 86
},
{
"epoch": 0.15803814713896458,
"grad_norm": 0.34874227643013,
"learning_rate": 5.060297685041659e-06,
"loss": 0.3298,
"step": 87
},
{
"epoch": 0.15985467756584923,
"grad_norm": 0.3021911680698395,
"learning_rate": 4.322727117869951e-06,
"loss": 0.356,
"step": 88
},
{
"epoch": 0.16167120799273388,
"grad_norm": 0.26396703720092773,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2144,
"step": 89
},
{
"epoch": 0.16348773841961853,
"grad_norm": 0.29361093044281006,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.2807,
"step": 90
},
{
"epoch": 0.16348773841961853,
"eval_loss": 0.2723295986652374,
"eval_runtime": 108.6297,
"eval_samples_per_second": 4.271,
"eval_steps_per_second": 0.534,
"step": 90
},
{
"epoch": 0.16530426884650318,
"grad_norm": 0.2374623417854309,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.2156,
"step": 91
},
{
"epoch": 0.16712079927338783,
"grad_norm": 0.26733702421188354,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.2058,
"step": 92
},
{
"epoch": 0.16893732970027248,
"grad_norm": 0.2634991407394409,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.2776,
"step": 93
},
{
"epoch": 0.17075386012715713,
"grad_norm": 0.2750228941440582,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.2635,
"step": 94
},
{
"epoch": 0.17257039055404177,
"grad_norm": 0.2913649380207062,
"learning_rate": 7.596123493895991e-07,
"loss": 0.2466,
"step": 95
},
{
"epoch": 0.17438692098092642,
"grad_norm": 0.30093085765838623,
"learning_rate": 4.865965629214819e-07,
"loss": 0.2855,
"step": 96
},
{
"epoch": 0.17620345140781107,
"grad_norm": 0.32675302028656006,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.2625,
"step": 97
},
{
"epoch": 0.17801998183469572,
"grad_norm": 0.2559075653553009,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.26,
"step": 98
},
{
"epoch": 0.17983651226158037,
"grad_norm": 0.3088376224040985,
"learning_rate": 3.04586490452119e-08,
"loss": 0.298,
"step": 99
},
{
"epoch": 0.17983651226158037,
"eval_loss": 0.27232545614242554,
"eval_runtime": 108.5508,
"eval_samples_per_second": 4.274,
"eval_steps_per_second": 0.534,
"step": 99
},
{
"epoch": 0.18165304268846502,
"grad_norm": 0.28909745812416077,
"learning_rate": 0.0,
"loss": 0.2986,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4281337959612416e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}