|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7488855869242199, |
|
"eval_steps": 42, |
|
"global_step": 126, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.29289755909939047, |
|
"learning_rate": 2e-05, |
|
"loss": 0.6274, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 1.0297596454620361, |
|
"eval_runtime": 153.0715, |
|
"eval_samples_per_second": 1.627, |
|
"eval_steps_per_second": 0.817, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.27956410941469395, |
|
"learning_rate": 4e-05, |
|
"loss": 0.6362, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.3009590515152092, |
|
"learning_rate": 6e-05, |
|
"loss": 0.6299, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.34354546270515235, |
|
"learning_rate": 8e-05, |
|
"loss": 0.6395, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.2470961998205002, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6068, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.2071993792912813, |
|
"learning_rate": 0.00012, |
|
"loss": 0.5993, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.20237019487582247, |
|
"learning_rate": 0.00014, |
|
"loss": 0.6293, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.13810925455451734, |
|
"learning_rate": 0.00016, |
|
"loss": 0.5101, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.22762469698117493, |
|
"learning_rate": 0.00018, |
|
"loss": 0.5527, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.15718586910417978, |
|
"learning_rate": 0.0002, |
|
"loss": 0.5437, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.13825435646308676, |
|
"learning_rate": 0.00019998023297700658, |
|
"loss": 0.5858, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.15072585803075744, |
|
"learning_rate": 0.00019992093972273018, |
|
"loss": 0.554, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.13616736806741778, |
|
"learning_rate": 0.00019982214367819328, |
|
"loss": 0.6199, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.15841398198140327, |
|
"learning_rate": 0.0001996838839014696, |
|
"loss": 0.5495, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.16548492090565414, |
|
"learning_rate": 0.00019950621505224273, |
|
"loss": 0.5043, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.16965923620151113, |
|
"learning_rate": 0.00019928920737019733, |
|
"loss": 0.5083, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.1853247768877015, |
|
"learning_rate": 0.0001990329466472502, |
|
"loss": 0.6318, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.1824469971767483, |
|
"learning_rate": 0.00019873753419363336, |
|
"loss": 0.4809, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.13288488939703025, |
|
"learning_rate": 0.00019840308679784207, |
|
"loss": 0.4974, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.18507602579352214, |
|
"learning_rate": 0.00019802973668046363, |
|
"loss": 0.5288, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.13591012925799634, |
|
"learning_rate": 0.0001976176314419051, |
|
"loss": 0.5291, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.13704233024715257, |
|
"learning_rate": 0.000197166934004041, |
|
"loss": 0.4819, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.15362008062157553, |
|
"learning_rate": 0.00019667782254580374, |
|
"loss": 0.5408, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.11171032733778614, |
|
"learning_rate": 0.00019615049043274205, |
|
"loss": 0.5101, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.11449051252849428, |
|
"learning_rate": 0.00019558514614057609, |
|
"loss": 0.5209, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.11611407915744347, |
|
"learning_rate": 0.00019498201317277828, |
|
"loss": 0.5005, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.12735633641627706, |
|
"learning_rate": 0.00019434132997221345, |
|
"loss": 0.4741, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.11871518327376328, |
|
"learning_rate": 0.0001936633498268728, |
|
"loss": 0.5213, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.11403376465541806, |
|
"learning_rate": 0.0001929483407697387, |
|
"loss": 0.4842, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.11675155934307391, |
|
"learning_rate": 0.00019219658547282067, |
|
"loss": 0.4825, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.10789338581384152, |
|
"learning_rate": 0.00019140838113540346, |
|
"loss": 0.4866, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.14336837303756964, |
|
"learning_rate": 0.00019058403936655233, |
|
"loss": 0.5325, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.10401694793599091, |
|
"learning_rate": 0.00018972388606192125, |
|
"loss": 0.4292, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.10614245315138679, |
|
"learning_rate": 0.0001888282612749132, |
|
"loss": 0.4638, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.1250509143492961, |
|
"learning_rate": 0.00018789751908224338, |
|
"loss": 0.4963, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.12668831423294083, |
|
"learning_rate": 0.00018693202744395827, |
|
"loss": 0.5043, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.11594924793976216, |
|
"learning_rate": 0.00018593216805796612, |
|
"loss": 0.5396, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.12365894749489846, |
|
"learning_rate": 0.00018489833620913642, |
|
"loss": 0.4899, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.13164318970805183, |
|
"learning_rate": 0.00018383094061302766, |
|
"loss": 0.5065, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.10587870431925453, |
|
"learning_rate": 0.00018273040325430574, |
|
"loss": 0.4805, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.12134503625595237, |
|
"learning_rate": 0.00018159715921991612, |
|
"loss": 0.5103, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.10855490758268896, |
|
"learning_rate": 0.00018043165652707649, |
|
"loss": 0.44, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.9770342707633972, |
|
"eval_runtime": 154.3933, |
|
"eval_samples_per_second": 1.613, |
|
"eval_steps_per_second": 0.81, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.11753509152196469, |
|
"learning_rate": 0.00017923435594615744, |
|
"loss": 0.4819, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.13774603727779988, |
|
"learning_rate": 0.00017800573081852122, |
|
"loss": 0.5451, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.11994636996852912, |
|
"learning_rate": 0.0001767462668693908, |
|
"loss": 0.5079, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.11803018063108017, |
|
"learning_rate": 0.00017545646201582303, |
|
"loss": 0.5183, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.12122026879022209, |
|
"learning_rate": 0.00017413682616986185, |
|
"loss": 0.4692, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.12840154129375927, |
|
"learning_rate": 0.00017278788103694943, |
|
"loss": 0.4538, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.10754191454075242, |
|
"learning_rate": 0.000171410159909675, |
|
"loss": 0.4745, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.10980392972154758, |
|
"learning_rate": 0.00017000420745694254, |
|
"loss": 0.5077, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.1018314102740997, |
|
"learning_rate": 0.00016857057950864132, |
|
"loss": 0.5077, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.10572512690181787, |
|
"learning_rate": 0.0001671098428359037, |
|
"loss": 0.4637, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.13106813432864392, |
|
"learning_rate": 0.00016562257492703757, |
|
"loss": 0.4718, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.11024484284605006, |
|
"learning_rate": 0.000164109363759222, |
|
"loss": 0.5115, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.12388815222110366, |
|
"learning_rate": 0.000162570807566056, |
|
"loss": 0.4756, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.12702188830349206, |
|
"learning_rate": 0.00016100751460105243, |
|
"loss": 0.4881, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.11086308659932648, |
|
"learning_rate": 0.00015942010289717105, |
|
"loss": 0.4701, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.11517890306226485, |
|
"learning_rate": 0.00015780920002248484, |
|
"loss": 0.4835, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.1253541013817757, |
|
"learning_rate": 0.0001561754428320771, |
|
"loss": 0.5147, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.11139862438389284, |
|
"learning_rate": 0.00015451947721626676, |
|
"loss": 0.4552, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.1152466912880715, |
|
"learning_rate": 0.00015284195784526195, |
|
"loss": 0.5053, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.11719477948785388, |
|
"learning_rate": 0.00015114354791034225, |
|
"loss": 0.5079, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.12730957754598396, |
|
"learning_rate": 0.0001494249188616723, |
|
"loss": 0.58, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.11907511614241814, |
|
"learning_rate": 0.00014768675014285062, |
|
"loss": 0.4579, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.13613222195950112, |
|
"learning_rate": 0.00014592972892229778, |
|
"loss": 0.4965, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.11610538032286319, |
|
"learning_rate": 0.0001441545498215912, |
|
"loss": 0.4467, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.1046724680527729, |
|
"learning_rate": 0.00014236191464085286, |
|
"loss": 0.4463, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.11631551012538931, |
|
"learning_rate": 0.00014055253208129938, |
|
"loss": 0.5304, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.11792788666231199, |
|
"learning_rate": 0.00013872711746506413, |
|
"loss": 0.453, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.11566019183159004, |
|
"learning_rate": 0.00013688639245240078, |
|
"loss": 0.5192, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.12967661214418025, |
|
"learning_rate": 0.00013503108475638244, |
|
"loss": 0.5625, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.10928605499181634, |
|
"learning_rate": 0.0001331619278552068, |
|
"loss": 0.4861, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.12485741141890881, |
|
"learning_rate": 0.00013127966070222274, |
|
"loss": 0.4782, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.1271574869759, |
|
"learning_rate": 0.00012938502743379212, |
|
"loss": 0.4819, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.14528941719728583, |
|
"learning_rate": 0.00012747877707510252, |
|
"loss": 0.5132, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.12760881412243183, |
|
"learning_rate": 0.0001255616632440475, |
|
"loss": 0.4817, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.1239773484446177, |
|
"learning_rate": 0.0001236344438532905, |
|
"loss": 0.5045, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.1307497276864945, |
|
"learning_rate": 0.0001216978808106318, |
|
"loss": 0.5091, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.1355749101256534, |
|
"learning_rate": 0.00011975273971779528, |
|
"loss": 0.5145, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.12394948316350356, |
|
"learning_rate": 0.00011779978956775506, |
|
"loss": 0.5057, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.13424789799433426, |
|
"learning_rate": 0.0001158398024407215, |
|
"loss": 0.5058, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.13054427599188898, |
|
"learning_rate": 0.00011387355319890685, |
|
"loss": 0.4683, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.11996704114604598, |
|
"learning_rate": 0.00011190181918019049, |
|
"loss": 0.4748, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.13071208464837492, |
|
"learning_rate": 0.00010992537989080618, |
|
"loss": 0.4406, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.9652944803237915, |
|
"eval_runtime": 153.5377, |
|
"eval_samples_per_second": 1.622, |
|
"eval_steps_per_second": 0.814, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.11393059638442392, |
|
"learning_rate": 0.00010794501669717145, |
|
"loss": 0.4877, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.11364636282385192, |
|
"learning_rate": 0.00010596151251698199, |
|
"loss": 0.4597, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.12548992883216656, |
|
"learning_rate": 0.0001039756515096926, |
|
"loss": 0.4685, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.12825123946992636, |
|
"learning_rate": 0.00010198821876650701, |
|
"loss": 0.4924, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.12264617890724591, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4678, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.1238990144553216, |
|
"learning_rate": 9.801178123349298e-05, |
|
"loss": 0.4854, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.13533454171482565, |
|
"learning_rate": 9.602434849030745e-05, |
|
"loss": 0.4784, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.13623990945679543, |
|
"learning_rate": 9.403848748301802e-05, |
|
"loss": 0.5322, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.16411366119133766, |
|
"learning_rate": 9.205498330282856e-05, |
|
"loss": 0.5258, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.11646811294381437, |
|
"learning_rate": 9.007462010919386e-05, |
|
"loss": 0.4599, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.12610649830308363, |
|
"learning_rate": 8.809818081980953e-05, |
|
"loss": 0.4891, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.12324596375061997, |
|
"learning_rate": 8.612644680109319e-05, |
|
"loss": 0.4771, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.12390720748290898, |
|
"learning_rate": 8.416019755927851e-05, |
|
"loss": 0.4814, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.11156155557793686, |
|
"learning_rate": 8.2200210432245e-05, |
|
"loss": 0.5041, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.11235071151397323, |
|
"learning_rate": 8.024726028220474e-05, |
|
"loss": 0.5023, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.12382934807374943, |
|
"learning_rate": 7.83021191893682e-05, |
|
"loss": 0.4917, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.13051565063971357, |
|
"learning_rate": 7.636555614670953e-05, |
|
"loss": 0.4567, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.11975246976802223, |
|
"learning_rate": 7.443833675595255e-05, |
|
"loss": 0.4584, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.1390325061190883, |
|
"learning_rate": 7.252122292489747e-05, |
|
"loss": 0.4853, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.13092284300351015, |
|
"learning_rate": 7.061497256620793e-05, |
|
"loss": 0.4623, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.1291139691922856, |
|
"learning_rate": 6.87203392977773e-05, |
|
"loss": 0.4826, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.12630345566649634, |
|
"learning_rate": 6.683807214479323e-05, |
|
"loss": 0.4597, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.13061642512554117, |
|
"learning_rate": 6.496891524361757e-05, |
|
"loss": 0.4415, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.11414947835097713, |
|
"learning_rate": 6.311360754759923e-05, |
|
"loss": 0.4011, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.11453014913188615, |
|
"learning_rate": 6.127288253493591e-05, |
|
"loss": 0.5118, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.15000233747727326, |
|
"learning_rate": 5.9447467918700614e-05, |
|
"loss": 0.482, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.14402898946913723, |
|
"learning_rate": 5.763808535914723e-05, |
|
"loss": 0.4881, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.1371953423024685, |
|
"learning_rate": 5.584545017840885e-05, |
|
"loss": 0.5178, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.14105581149485616, |
|
"learning_rate": 5.407027107770219e-05, |
|
"loss": 0.5584, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.13646035299068404, |
|
"learning_rate": 5.2313249857149414e-05, |
|
"loss": 0.4535, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.14741390311838884, |
|
"learning_rate": 5.0575081138327715e-05, |
|
"loss": 0.5, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.12313483515418376, |
|
"learning_rate": 4.885645208965779e-05, |
|
"loss": 0.4982, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.13192008123654636, |
|
"learning_rate": 4.715804215473809e-05, |
|
"loss": 0.4698, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.11197839688620534, |
|
"learning_rate": 4.548052278373327e-05, |
|
"loss": 0.4737, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.12574619847347493, |
|
"learning_rate": 4.382455716792291e-05, |
|
"loss": 0.472, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.11785384512597769, |
|
"learning_rate": 4.219079997751515e-05, |
|
"loss": 0.4954, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.1371137498771671, |
|
"learning_rate": 4.0579897102828966e-05, |
|
"loss": 0.4645, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.1562115085397725, |
|
"learning_rate": 3.899248539894757e-05, |
|
"loss": 0.4798, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.14251919403809987, |
|
"learning_rate": 3.7429192433944014e-05, |
|
"loss": 0.4794, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.13567896269328303, |
|
"learning_rate": 3.589063624077802e-05, |
|
"loss": 0.4441, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.14154096161651117, |
|
"learning_rate": 3.4377425072962465e-05, |
|
"loss": 0.4583, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.12685010104316322, |
|
"learning_rate": 3.289015716409631e-05, |
|
"loss": 0.4445, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.9644750952720642, |
|
"eval_runtime": 155.1633, |
|
"eval_samples_per_second": 1.605, |
|
"eval_steps_per_second": 0.806, |
|
"step": 126 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 168, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 42, |
|
"total_flos": 2.5549258322041897e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|