Jambatypus-v0.1 / checkpoint-126 /trainer_state.json
mlabonne's picture
Upload folder using huggingface_hub
44f439e verified
raw
history blame
21.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7488855869242199,
"eval_steps": 42,
"global_step": 126,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.30475762367257353,
"learning_rate": 2e-05,
"loss": 0.6274,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 1.0297596454620361,
"eval_runtime": 150.6014,
"eval_samples_per_second": 1.653,
"eval_steps_per_second": 0.83,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.2798137150913395,
"learning_rate": 4e-05,
"loss": 0.6362,
"step": 2
},
{
"epoch": 0.02,
"grad_norm": 0.3178684451319545,
"learning_rate": 6e-05,
"loss": 0.6299,
"step": 3
},
{
"epoch": 0.02,
"grad_norm": 0.33651284916847835,
"learning_rate": 8e-05,
"loss": 0.6391,
"step": 4
},
{
"epoch": 0.03,
"grad_norm": 0.23984915388648712,
"learning_rate": 0.0001,
"loss": 0.6071,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.20514040410017348,
"learning_rate": 0.00012,
"loss": 0.5996,
"step": 6
},
{
"epoch": 0.04,
"grad_norm": 0.1950018128286362,
"learning_rate": 0.00014,
"loss": 0.6298,
"step": 7
},
{
"epoch": 0.05,
"grad_norm": 0.14246019947393238,
"learning_rate": 0.00016,
"loss": 0.5108,
"step": 8
},
{
"epoch": 0.05,
"grad_norm": 0.15792014279750227,
"learning_rate": 0.00018,
"loss": 0.5529,
"step": 9
},
{
"epoch": 0.06,
"grad_norm": 0.1517889177511264,
"learning_rate": 0.0002,
"loss": 0.5433,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 0.1372121219437277,
"learning_rate": 0.00019998023297700658,
"loss": 0.5856,
"step": 11
},
{
"epoch": 0.07,
"grad_norm": 0.16740807394942855,
"learning_rate": 0.00019992093972273018,
"loss": 0.5546,
"step": 12
},
{
"epoch": 0.08,
"grad_norm": 0.13512320693394078,
"learning_rate": 0.00019982214367819328,
"loss": 0.6193,
"step": 13
},
{
"epoch": 0.08,
"grad_norm": 0.16169796294070152,
"learning_rate": 0.0001996838839014696,
"loss": 0.5495,
"step": 14
},
{
"epoch": 0.09,
"grad_norm": 0.16796913812281988,
"learning_rate": 0.00019950621505224273,
"loss": 0.5035,
"step": 15
},
{
"epoch": 0.1,
"grad_norm": 0.1800514764162192,
"learning_rate": 0.00019928920737019733,
"loss": 0.5083,
"step": 16
},
{
"epoch": 0.1,
"grad_norm": 0.170432124866908,
"learning_rate": 0.0001990329466472502,
"loss": 0.632,
"step": 17
},
{
"epoch": 0.11,
"grad_norm": 0.19129325489749488,
"learning_rate": 0.00019873753419363336,
"loss": 0.4813,
"step": 18
},
{
"epoch": 0.11,
"grad_norm": 0.1459357988760762,
"learning_rate": 0.00019840308679784207,
"loss": 0.4973,
"step": 19
},
{
"epoch": 0.12,
"grad_norm": 0.192594730984382,
"learning_rate": 0.00019802973668046363,
"loss": 0.5291,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 0.859025467969139,
"learning_rate": 0.0001976176314419051,
"loss": 0.5296,
"step": 21
},
{
"epoch": 0.13,
"grad_norm": 0.13366297885670222,
"learning_rate": 0.000197166934004041,
"loss": 0.4819,
"step": 22
},
{
"epoch": 0.14,
"grad_norm": 0.15698714419747645,
"learning_rate": 0.00019667782254580374,
"loss": 0.5409,
"step": 23
},
{
"epoch": 0.14,
"grad_norm": 0.10995943735837355,
"learning_rate": 0.00019615049043274205,
"loss": 0.5108,
"step": 24
},
{
"epoch": 0.15,
"grad_norm": 0.10796742192788925,
"learning_rate": 0.00019558514614057609,
"loss": 0.5215,
"step": 25
},
{
"epoch": 0.15,
"grad_norm": 0.11641740089490231,
"learning_rate": 0.00019498201317277828,
"loss": 0.5012,
"step": 26
},
{
"epoch": 0.16,
"grad_norm": 0.1120175962893241,
"learning_rate": 0.00019434132997221345,
"loss": 0.474,
"step": 27
},
{
"epoch": 0.17,
"grad_norm": 0.1218171278782483,
"learning_rate": 0.0001936633498268728,
"loss": 0.5216,
"step": 28
},
{
"epoch": 0.17,
"grad_norm": 0.11718521115928844,
"learning_rate": 0.0001929483407697387,
"loss": 0.4856,
"step": 29
},
{
"epoch": 0.18,
"grad_norm": 0.12611471038571026,
"learning_rate": 0.00019219658547282067,
"loss": 0.4823,
"step": 30
},
{
"epoch": 0.18,
"grad_norm": 0.11106871615269753,
"learning_rate": 0.00019140838113540346,
"loss": 0.4869,
"step": 31
},
{
"epoch": 0.19,
"grad_norm": 0.1416503230360699,
"learning_rate": 0.00019058403936655233,
"loss": 0.5341,
"step": 32
},
{
"epoch": 0.2,
"grad_norm": 0.10761396399791698,
"learning_rate": 0.00018972388606192125,
"loss": 0.4304,
"step": 33
},
{
"epoch": 0.2,
"grad_norm": 0.10975376180434356,
"learning_rate": 0.0001888282612749132,
"loss": 0.4646,
"step": 34
},
{
"epoch": 0.21,
"grad_norm": 0.12848879670359908,
"learning_rate": 0.00018789751908224338,
"loss": 0.4972,
"step": 35
},
{
"epoch": 0.21,
"grad_norm": 0.11904721819683833,
"learning_rate": 0.00018693202744395827,
"loss": 0.505,
"step": 36
},
{
"epoch": 0.22,
"grad_norm": 0.12249852034224981,
"learning_rate": 0.00018593216805796612,
"loss": 0.5396,
"step": 37
},
{
"epoch": 0.23,
"grad_norm": 0.12453395046646995,
"learning_rate": 0.00018489833620913642,
"loss": 0.4917,
"step": 38
},
{
"epoch": 0.23,
"grad_norm": 0.12585770374422164,
"learning_rate": 0.00018383094061302766,
"loss": 0.5079,
"step": 39
},
{
"epoch": 0.24,
"grad_norm": 0.11095271476322731,
"learning_rate": 0.00018273040325430574,
"loss": 0.4812,
"step": 40
},
{
"epoch": 0.24,
"grad_norm": 0.12968115101635422,
"learning_rate": 0.00018159715921991612,
"loss": 0.5106,
"step": 41
},
{
"epoch": 0.25,
"grad_norm": 0.10933018515590627,
"learning_rate": 0.00018043165652707649,
"loss": 0.4403,
"step": 42
},
{
"epoch": 0.25,
"eval_loss": 0.9767947196960449,
"eval_runtime": 152.0343,
"eval_samples_per_second": 1.638,
"eval_steps_per_second": 0.822,
"step": 42
},
{
"epoch": 0.26,
"grad_norm": 0.11525719626792096,
"learning_rate": 0.00017923435594615744,
"loss": 0.482,
"step": 43
},
{
"epoch": 0.26,
"grad_norm": 0.12962154411778218,
"learning_rate": 0.00017800573081852122,
"loss": 0.5452,
"step": 44
},
{
"epoch": 0.27,
"grad_norm": 0.12555700120045588,
"learning_rate": 0.0001767462668693908,
"loss": 0.5084,
"step": 45
},
{
"epoch": 0.27,
"grad_norm": 0.11427565378293324,
"learning_rate": 0.00017545646201582303,
"loss": 0.5191,
"step": 46
},
{
"epoch": 0.28,
"grad_norm": 0.10974901402857151,
"learning_rate": 0.00017413682616986185,
"loss": 0.4703,
"step": 47
},
{
"epoch": 0.29,
"grad_norm": 0.11781465084480325,
"learning_rate": 0.00017278788103694943,
"loss": 0.4548,
"step": 48
},
{
"epoch": 0.29,
"grad_norm": 0.10781807228559999,
"learning_rate": 0.000171410159909675,
"loss": 0.476,
"step": 49
},
{
"epoch": 0.3,
"grad_norm": 0.12502639462035098,
"learning_rate": 0.00017000420745694254,
"loss": 0.5084,
"step": 50
},
{
"epoch": 0.3,
"grad_norm": 0.10718920826593327,
"learning_rate": 0.00016857057950864132,
"loss": 0.5093,
"step": 51
},
{
"epoch": 0.31,
"grad_norm": 0.10040549880547282,
"learning_rate": 0.0001671098428359037,
"loss": 0.4644,
"step": 52
},
{
"epoch": 0.32,
"grad_norm": 0.11778478994740472,
"learning_rate": 0.00016562257492703757,
"loss": 0.4725,
"step": 53
},
{
"epoch": 0.32,
"grad_norm": 0.1008386031049932,
"learning_rate": 0.000164109363759222,
"loss": 0.5121,
"step": 54
},
{
"epoch": 0.33,
"grad_norm": 0.1170302528140235,
"learning_rate": 0.000162570807566056,
"loss": 0.4766,
"step": 55
},
{
"epoch": 0.33,
"grad_norm": 0.1104526773884303,
"learning_rate": 0.00016100751460105243,
"loss": 0.4886,
"step": 56
},
{
"epoch": 0.34,
"grad_norm": 0.10467920768691032,
"learning_rate": 0.00015942010289717105,
"loss": 0.4703,
"step": 57
},
{
"epoch": 0.34,
"grad_norm": 0.11551406829220555,
"learning_rate": 0.00015780920002248484,
"loss": 0.4837,
"step": 58
},
{
"epoch": 0.35,
"grad_norm": 0.11133818831887894,
"learning_rate": 0.0001561754428320771,
"loss": 0.5148,
"step": 59
},
{
"epoch": 0.36,
"grad_norm": 0.11281448423273216,
"learning_rate": 0.00015451947721626676,
"loss": 0.4561,
"step": 60
},
{
"epoch": 0.36,
"grad_norm": 0.13934126997471205,
"learning_rate": 0.00015284195784526195,
"loss": 0.5069,
"step": 61
},
{
"epoch": 0.37,
"grad_norm": 0.11851655387640142,
"learning_rate": 0.00015114354791034225,
"loss": 0.5094,
"step": 62
},
{
"epoch": 0.37,
"grad_norm": 0.12909148374566123,
"learning_rate": 0.0001494249188616723,
"loss": 0.581,
"step": 63
},
{
"epoch": 0.38,
"grad_norm": 0.11070161341925377,
"learning_rate": 0.00014768675014285062,
"loss": 0.4585,
"step": 64
},
{
"epoch": 0.39,
"grad_norm": 0.13308674882888374,
"learning_rate": 0.00014592972892229778,
"loss": 0.4974,
"step": 65
},
{
"epoch": 0.39,
"grad_norm": 0.12124588853708144,
"learning_rate": 0.0001441545498215912,
"loss": 0.4463,
"step": 66
},
{
"epoch": 0.4,
"grad_norm": 0.1183570515369953,
"learning_rate": 0.00014236191464085286,
"loss": 0.447,
"step": 67
},
{
"epoch": 0.4,
"grad_norm": 0.13520024884417237,
"learning_rate": 0.00014055253208129938,
"loss": 0.5309,
"step": 68
},
{
"epoch": 0.41,
"grad_norm": 0.12184981458813801,
"learning_rate": 0.00013872711746506413,
"loss": 0.4532,
"step": 69
},
{
"epoch": 0.42,
"grad_norm": 0.12449299540645078,
"learning_rate": 0.00013688639245240078,
"loss": 0.5198,
"step": 70
},
{
"epoch": 0.42,
"grad_norm": 0.1383134750490429,
"learning_rate": 0.00013503108475638244,
"loss": 0.5629,
"step": 71
},
{
"epoch": 0.43,
"grad_norm": 0.246237001656926,
"learning_rate": 0.0001331619278552068,
"loss": 0.4869,
"step": 72
},
{
"epoch": 0.43,
"grad_norm": 0.13337703940933632,
"learning_rate": 0.00013127966070222274,
"loss": 0.4792,
"step": 73
},
{
"epoch": 0.44,
"grad_norm": 0.12428922033806454,
"learning_rate": 0.00012938502743379212,
"loss": 0.4825,
"step": 74
},
{
"epoch": 0.45,
"grad_norm": 0.13290774912900208,
"learning_rate": 0.00012747877707510252,
"loss": 0.5138,
"step": 75
},
{
"epoch": 0.45,
"grad_norm": 0.11185975046756892,
"learning_rate": 0.0001255616632440475,
"loss": 0.4815,
"step": 76
},
{
"epoch": 0.46,
"grad_norm": 0.1130592868215497,
"learning_rate": 0.0001236344438532905,
"loss": 0.5046,
"step": 77
},
{
"epoch": 0.46,
"grad_norm": 0.12882943465594857,
"learning_rate": 0.0001216978808106318,
"loss": 0.5091,
"step": 78
},
{
"epoch": 0.47,
"grad_norm": 0.14837896297082676,
"learning_rate": 0.00011975273971779528,
"loss": 0.5158,
"step": 79
},
{
"epoch": 0.48,
"grad_norm": 0.1265223309856292,
"learning_rate": 0.00011779978956775506,
"loss": 0.5068,
"step": 80
},
{
"epoch": 0.48,
"grad_norm": 0.14042502330520407,
"learning_rate": 0.0001158398024407215,
"loss": 0.5061,
"step": 81
},
{
"epoch": 0.49,
"grad_norm": 0.1261526695491767,
"learning_rate": 0.00011387355319890685,
"loss": 0.4691,
"step": 82
},
{
"epoch": 0.49,
"grad_norm": 0.12007305451001854,
"learning_rate": 0.00011190181918019049,
"loss": 0.4753,
"step": 83
},
{
"epoch": 0.5,
"grad_norm": 0.12809956897166885,
"learning_rate": 0.00010992537989080618,
"loss": 0.4417,
"step": 84
},
{
"epoch": 0.5,
"eval_loss": 0.9675251841545105,
"eval_runtime": 152.4793,
"eval_samples_per_second": 1.633,
"eval_steps_per_second": 0.82,
"step": 84
},
{
"epoch": 0.51,
"grad_norm": 0.11858329804793687,
"learning_rate": 0.00010794501669717145,
"loss": 0.4868,
"step": 85
},
{
"epoch": 0.51,
"grad_norm": 0.10984649953887334,
"learning_rate": 0.00010596151251698199,
"loss": 0.4598,
"step": 86
},
{
"epoch": 0.52,
"grad_norm": 0.10927203986256682,
"learning_rate": 0.0001039756515096926,
"loss": 0.4693,
"step": 87
},
{
"epoch": 0.52,
"grad_norm": 0.11205046531522328,
"learning_rate": 0.00010198821876650701,
"loss": 0.4921,
"step": 88
},
{
"epoch": 0.53,
"grad_norm": 0.13232347270009215,
"learning_rate": 0.0001,
"loss": 0.4695,
"step": 89
},
{
"epoch": 0.53,
"grad_norm": 0.12136881873560385,
"learning_rate": 9.801178123349298e-05,
"loss": 0.4859,
"step": 90
},
{
"epoch": 0.54,
"grad_norm": 0.14347476421156694,
"learning_rate": 9.602434849030745e-05,
"loss": 0.4796,
"step": 91
},
{
"epoch": 0.55,
"grad_norm": 0.13956845267055204,
"learning_rate": 9.403848748301802e-05,
"loss": 0.5339,
"step": 92
},
{
"epoch": 0.55,
"grad_norm": 0.12814010903196785,
"learning_rate": 9.205498330282856e-05,
"loss": 0.5267,
"step": 93
},
{
"epoch": 0.56,
"grad_norm": 0.12798850330908082,
"learning_rate": 9.007462010919386e-05,
"loss": 0.4604,
"step": 94
},
{
"epoch": 0.56,
"grad_norm": 0.13673366056605873,
"learning_rate": 8.809818081980953e-05,
"loss": 0.49,
"step": 95
},
{
"epoch": 0.57,
"grad_norm": 0.12607483394599764,
"learning_rate": 8.612644680109319e-05,
"loss": 0.4774,
"step": 96
},
{
"epoch": 0.58,
"grad_norm": 0.1365629261848207,
"learning_rate": 8.416019755927851e-05,
"loss": 0.4827,
"step": 97
},
{
"epoch": 0.58,
"grad_norm": 0.12122559291940836,
"learning_rate": 8.2200210432245e-05,
"loss": 0.5044,
"step": 98
},
{
"epoch": 0.59,
"grad_norm": 0.11655390642565265,
"learning_rate": 8.024726028220474e-05,
"loss": 0.503,
"step": 99
},
{
"epoch": 0.59,
"grad_norm": 0.12394574502796742,
"learning_rate": 7.83021191893682e-05,
"loss": 0.491,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 0.14922841699852962,
"learning_rate": 7.636555614670953e-05,
"loss": 0.457,
"step": 101
},
{
"epoch": 0.61,
"grad_norm": 0.11076924096187928,
"learning_rate": 7.443833675595255e-05,
"loss": 0.4603,
"step": 102
},
{
"epoch": 0.61,
"grad_norm": 0.1263594611752413,
"learning_rate": 7.252122292489747e-05,
"loss": 0.4859,
"step": 103
},
{
"epoch": 0.62,
"grad_norm": 0.11432575178505003,
"learning_rate": 7.061497256620793e-05,
"loss": 0.4627,
"step": 104
},
{
"epoch": 0.62,
"grad_norm": 0.1078119391965793,
"learning_rate": 6.87203392977773e-05,
"loss": 0.4829,
"step": 105
},
{
"epoch": 0.63,
"grad_norm": 0.12752089816514908,
"learning_rate": 6.683807214479323e-05,
"loss": 0.46,
"step": 106
},
{
"epoch": 0.64,
"grad_norm": 0.11421623043902956,
"learning_rate": 6.496891524361757e-05,
"loss": 0.4429,
"step": 107
},
{
"epoch": 0.64,
"grad_norm": 0.10432253193399477,
"learning_rate": 6.311360754759923e-05,
"loss": 0.402,
"step": 108
},
{
"epoch": 0.65,
"grad_norm": 0.12155248673662734,
"learning_rate": 6.127288253493591e-05,
"loss": 0.5126,
"step": 109
},
{
"epoch": 0.65,
"grad_norm": 0.14266947863559803,
"learning_rate": 5.9447467918700614e-05,
"loss": 0.4821,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 0.14851250761112514,
"learning_rate": 5.763808535914723e-05,
"loss": 0.4891,
"step": 111
},
{
"epoch": 0.67,
"grad_norm": 0.14264023747361737,
"learning_rate": 5.584545017840885e-05,
"loss": 0.5181,
"step": 112
},
{
"epoch": 0.67,
"grad_norm": 0.12837168363458795,
"learning_rate": 5.407027107770219e-05,
"loss": 0.5599,
"step": 113
},
{
"epoch": 0.68,
"grad_norm": 0.11874709251257598,
"learning_rate": 5.2313249857149414e-05,
"loss": 0.4536,
"step": 114
},
{
"epoch": 0.68,
"grad_norm": 0.12010754957532713,
"learning_rate": 5.0575081138327715e-05,
"loss": 0.5004,
"step": 115
},
{
"epoch": 0.69,
"grad_norm": 0.13464124440677885,
"learning_rate": 4.885645208965779e-05,
"loss": 0.4985,
"step": 116
},
{
"epoch": 0.7,
"grad_norm": 0.13701854261941088,
"learning_rate": 4.715804215473809e-05,
"loss": 0.4709,
"step": 117
},
{
"epoch": 0.7,
"grad_norm": 0.1335483738873249,
"learning_rate": 4.548052278373327e-05,
"loss": 0.4735,
"step": 118
},
{
"epoch": 0.71,
"grad_norm": 0.13603172024059101,
"learning_rate": 4.382455716792291e-05,
"loss": 0.4721,
"step": 119
},
{
"epoch": 0.71,
"grad_norm": 0.13843339239058639,
"learning_rate": 4.219079997751515e-05,
"loss": 0.4954,
"step": 120
},
{
"epoch": 0.72,
"grad_norm": 0.15011169526780793,
"learning_rate": 4.0579897102828966e-05,
"loss": 0.4648,
"step": 121
},
{
"epoch": 0.73,
"grad_norm": 0.13061595453081623,
"learning_rate": 3.899248539894757e-05,
"loss": 0.4801,
"step": 122
},
{
"epoch": 0.73,
"grad_norm": 0.14067787924603412,
"learning_rate": 3.7429192433944014e-05,
"loss": 0.4805,
"step": 123
},
{
"epoch": 0.74,
"grad_norm": 0.13420057703295998,
"learning_rate": 3.589063624077802e-05,
"loss": 0.4446,
"step": 124
},
{
"epoch": 0.74,
"grad_norm": 0.14083737654873127,
"learning_rate": 3.4377425072962465e-05,
"loss": 0.46,
"step": 125
},
{
"epoch": 0.75,
"grad_norm": 0.13231889777376862,
"learning_rate": 3.289015716409631e-05,
"loss": 0.4451,
"step": 126
},
{
"epoch": 0.75,
"eval_loss": 0.9651579260826111,
"eval_runtime": 155.5959,
"eval_samples_per_second": 1.6,
"eval_steps_per_second": 0.803,
"step": 126
}
],
"logging_steps": 1,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"total_flos": 2.5549258322041897e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}