|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9991537376586743, |
|
"eval_steps": 500, |
|
"global_step": 1329, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022566995768688293, |
|
"grad_norm": 1.1076090467924995, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7438, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045133991537376586, |
|
"grad_norm": 0.991735987363898, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6802, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06770098730606489, |
|
"grad_norm": 0.8721792478756093, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6629, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09026798307475317, |
|
"grad_norm": 0.5951545166641695, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6406, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11283497884344147, |
|
"grad_norm": 0.7007845996565958, |
|
"learning_rate": 5e-06, |
|
"loss": 0.634, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13540197461212977, |
|
"grad_norm": 0.5943811510802003, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6332, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15796897038081806, |
|
"grad_norm": 0.6353234676535418, |
|
"learning_rate": 5e-06, |
|
"loss": 0.629, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18053596614950634, |
|
"grad_norm": 0.6006567013207602, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6306, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20310296191819463, |
|
"grad_norm": 0.7046632924461987, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6282, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22566995768688294, |
|
"grad_norm": 0.7021240097891023, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6272, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24823695345557123, |
|
"grad_norm": 0.5910229174924321, |
|
"learning_rate": 5e-06, |
|
"loss": 0.618, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.27080394922425954, |
|
"grad_norm": 0.596959901149353, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6173, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2933709449929478, |
|
"grad_norm": 0.8993218759485339, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6185, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3159379407616361, |
|
"grad_norm": 0.6147671660802024, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6135, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3385049365303244, |
|
"grad_norm": 0.6724362951434409, |
|
"learning_rate": 5e-06, |
|
"loss": 0.614, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3610719322990127, |
|
"grad_norm": 0.6020059058264652, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6157, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.383638928067701, |
|
"grad_norm": 0.6467062739486626, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6118, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.40620592383638926, |
|
"grad_norm": 0.6337134461733973, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6087, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4287729196050776, |
|
"grad_norm": 1.187290343191463, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6032, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4513399153737659, |
|
"grad_norm": 0.6573468292913784, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6099, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.47390691114245415, |
|
"grad_norm": 0.5436088131885063, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6063, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.49647390691114246, |
|
"grad_norm": 0.5714729478422241, |
|
"learning_rate": 5e-06, |
|
"loss": 0.598, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5190409026798307, |
|
"grad_norm": 0.6369923617172581, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6068, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5416078984485191, |
|
"grad_norm": 0.6174803589160689, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6057, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5641748942172073, |
|
"grad_norm": 0.5746679987420106, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5998, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5867418899858956, |
|
"grad_norm": 0.7158022714822785, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6025, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.609308885754584, |
|
"grad_norm": 0.6550177518697455, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5979, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6318758815232722, |
|
"grad_norm": 0.5848914985146456, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6045, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6544428772919605, |
|
"grad_norm": 0.6013598059389905, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5979, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6770098730606487, |
|
"grad_norm": 0.6694540638792662, |
|
"learning_rate": 5e-06, |
|
"loss": 0.604, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6995768688293371, |
|
"grad_norm": 0.5419093913651906, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5972, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7221438645980254, |
|
"grad_norm": 0.5432682430557793, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6004, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7447108603667136, |
|
"grad_norm": 0.5609209464002083, |
|
"learning_rate": 5e-06, |
|
"loss": 0.596, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.767277856135402, |
|
"grad_norm": 0.5771320670008115, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5955, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7898448519040903, |
|
"grad_norm": 0.6553502516133991, |
|
"learning_rate": 5e-06, |
|
"loss": 0.597, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8124118476727785, |
|
"grad_norm": 0.5735610527819333, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5959, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8349788434414669, |
|
"grad_norm": 0.6196981942326923, |
|
"learning_rate": 5e-06, |
|
"loss": 0.603, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8575458392101551, |
|
"grad_norm": 0.5892757008227134, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5997, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8801128349788434, |
|
"grad_norm": 0.6377051547834243, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5937, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9026798307475318, |
|
"grad_norm": 0.5793619434689786, |
|
"learning_rate": 5e-06, |
|
"loss": 0.596, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.92524682651622, |
|
"grad_norm": 0.5940412617035421, |
|
"learning_rate": 5e-06, |
|
"loss": 0.594, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.9478138222849083, |
|
"grad_norm": 0.5720643197976489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5946, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9703808180535967, |
|
"grad_norm": 0.5934993591499655, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5886, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9929478138222849, |
|
"grad_norm": 0.7147785717960933, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5934, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9997179125528914, |
|
"eval_loss": 0.5912777781486511, |
|
"eval_runtime": 691.0827, |
|
"eval_samples_per_second": 17.279, |
|
"eval_steps_per_second": 0.541, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.0155148095909732, |
|
"grad_norm": 0.6953054875114835, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6008, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0380818053596614, |
|
"grad_norm": 0.5903993048531896, |
|
"learning_rate": 5e-06, |
|
"loss": 0.528, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.0606488011283497, |
|
"grad_norm": 0.6324628267144559, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5429, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0832157968970382, |
|
"grad_norm": 0.5764061185246054, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5308, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.1057827926657264, |
|
"grad_norm": 0.6012235908025508, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5408, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.1283497884344147, |
|
"grad_norm": 0.5789335395759082, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5396, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.150916784203103, |
|
"grad_norm": 0.6109006953078554, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5407, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.1734837799717912, |
|
"grad_norm": 0.5684452419181554, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5333, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.1960507757404795, |
|
"grad_norm": 0.6963518157721161, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5395, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.2186177715091677, |
|
"grad_norm": 0.6141353364563702, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5371, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.2411847672778562, |
|
"grad_norm": 0.6539340543845323, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5419, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.2637517630465445, |
|
"grad_norm": 0.6168349027651291, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5438, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.2863187588152327, |
|
"grad_norm": 0.581736789269481, |
|
"learning_rate": 5e-06, |
|
"loss": 0.541, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.308885754583921, |
|
"grad_norm": 0.5594406044592434, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5409, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.3314527503526092, |
|
"grad_norm": 0.5620275131748222, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5411, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.3540197461212977, |
|
"grad_norm": 0.6238286913601226, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5463, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.376586741889986, |
|
"grad_norm": 0.5415008064753611, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5391, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.3991537376586742, |
|
"grad_norm": 0.5610560374369729, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5433, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.4217207334273625, |
|
"grad_norm": 0.550463676620283, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5413, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.4442877291960508, |
|
"grad_norm": 0.639666171350872, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5409, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.466854724964739, |
|
"grad_norm": 0.5585935410492179, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5418, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.4894217207334273, |
|
"grad_norm": 0.6134871404266905, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5456, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.5119887165021155, |
|
"grad_norm": 0.5682459714599422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5403, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.5345557122708038, |
|
"grad_norm": 0.5522175532246568, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5344, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.5571227080394923, |
|
"grad_norm": 0.5925815010766872, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5366, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.5796897038081805, |
|
"grad_norm": 0.6578651642561647, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5408, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.6022566995768688, |
|
"grad_norm": 0.5408548916824767, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5435, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.6248236953455573, |
|
"grad_norm": 0.5988342872882225, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5518, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.6473906911142455, |
|
"grad_norm": 0.5384009297069935, |
|
"learning_rate": 5e-06, |
|
"loss": 0.535, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.6699576868829338, |
|
"grad_norm": 0.7024106203910366, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5508, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.692524682651622, |
|
"grad_norm": 0.6637725967890132, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5422, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.7150916784203103, |
|
"grad_norm": 0.6071900872105588, |
|
"learning_rate": 5e-06, |
|
"loss": 0.541, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.7376586741889986, |
|
"grad_norm": 0.6565272249072508, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5417, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.7602256699576868, |
|
"grad_norm": 0.5523582207595168, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5375, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.782792665726375, |
|
"grad_norm": 0.5699161494100724, |
|
"learning_rate": 5e-06, |
|
"loss": 0.541, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.8053596614950633, |
|
"grad_norm": 0.5993412920248554, |
|
"learning_rate": 5e-06, |
|
"loss": 0.536, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.8279266572637518, |
|
"grad_norm": 0.7661220656377966, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5468, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.85049365303244, |
|
"grad_norm": 0.6803479582333226, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5341, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.8730606488011283, |
|
"grad_norm": 0.5645527605654231, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5462, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.8956276445698168, |
|
"grad_norm": 0.6297088212997327, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5461, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.918194640338505, |
|
"grad_norm": 0.555460659706579, |
|
"learning_rate": 5e-06, |
|
"loss": 0.534, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.9407616361071933, |
|
"grad_norm": 0.6152170831362264, |
|
"learning_rate": 5e-06, |
|
"loss": 0.539, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.9633286318758816, |
|
"grad_norm": 0.8147767450293179, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5456, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.9858956276445698, |
|
"grad_norm": 0.5537465355114557, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5439, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.9994358251057829, |
|
"eval_loss": 0.5872690677642822, |
|
"eval_runtime": 693.4307, |
|
"eval_samples_per_second": 17.22, |
|
"eval_steps_per_second": 0.539, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 2.008462623413258, |
|
"grad_norm": 1.1153368590085961, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5671, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.0310296191819464, |
|
"grad_norm": 0.7190180926473527, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4778, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.0535966149506346, |
|
"grad_norm": 0.9191132830124585, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4796, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.076163610719323, |
|
"grad_norm": 0.6036730274846656, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4721, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.098730606488011, |
|
"grad_norm": 0.6101496201859484, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4795, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.1212976022566994, |
|
"grad_norm": 0.585050989650914, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4837, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.143864598025388, |
|
"grad_norm": 0.6277895846288643, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4853, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.1664315937940763, |
|
"grad_norm": 0.6008146434474768, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4816, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.1889985895627646, |
|
"grad_norm": 0.7050816006704801, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4798, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.211565585331453, |
|
"grad_norm": 0.6039233304716536, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4748, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.234132581100141, |
|
"grad_norm": 0.567509568979696, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4747, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.2566995768688294, |
|
"grad_norm": 0.5935331317563618, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4836, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.2792665726375176, |
|
"grad_norm": 0.6059487518831281, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4899, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.301833568406206, |
|
"grad_norm": 0.7227979366745806, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4843, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.324400564174894, |
|
"grad_norm": 0.6568599565188047, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4887, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.3469675599435824, |
|
"grad_norm": 0.6397579402229872, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4865, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.3695345557122707, |
|
"grad_norm": 0.6245354910262706, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4844, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.392101551480959, |
|
"grad_norm": 0.7052327086747691, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4837, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.414668547249647, |
|
"grad_norm": 0.7931151763067521, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4849, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.4372355430183354, |
|
"grad_norm": 0.7417762688685384, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4883, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.459802538787024, |
|
"grad_norm": 0.5727539550326386, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4866, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.4823695345557124, |
|
"grad_norm": 0.6064535411598891, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4897, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.5049365303244007, |
|
"grad_norm": 0.7529141319606983, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4928, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.527503526093089, |
|
"grad_norm": 0.6477529656503626, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4834, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.550070521861777, |
|
"grad_norm": 0.6001686437129918, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4884, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.5726375176304654, |
|
"grad_norm": 0.5780283875472694, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4867, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.5952045133991537, |
|
"grad_norm": 0.6869772073867023, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4875, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.617771509167842, |
|
"grad_norm": 0.6343021260253895, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4911, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.64033850493653, |
|
"grad_norm": 0.6420034867026322, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4803, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.6629055007052185, |
|
"grad_norm": 0.6187250288318752, |
|
"learning_rate": 5e-06, |
|
"loss": 0.491, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.685472496473907, |
|
"grad_norm": 0.5729742442845362, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4917, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.7080394922425954, |
|
"grad_norm": 0.6175661543737463, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4903, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.7306064880112837, |
|
"grad_norm": 0.5877150613568881, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4916, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.753173483779972, |
|
"grad_norm": 0.5503620579944024, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4882, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.77574047954866, |
|
"grad_norm": 0.5737020950553914, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4947, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.7983074753173485, |
|
"grad_norm": 0.6207907502188522, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4929, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.8208744710860367, |
|
"grad_norm": 0.5646776454664503, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4931, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.843441466854725, |
|
"grad_norm": 0.5759605937513818, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4949, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.8660084626234132, |
|
"grad_norm": 0.596436862143904, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4918, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.8885754583921015, |
|
"grad_norm": 0.5729014229985798, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4899, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.9111424541607898, |
|
"grad_norm": 0.6263762817305824, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4937, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.933709449929478, |
|
"grad_norm": 0.608514072546159, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4893, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.9562764456981663, |
|
"grad_norm": 0.6572286286670663, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4932, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.9788434414668545, |
|
"grad_norm": 0.5821839995459226, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4896, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.9991537376586743, |
|
"eval_loss": 0.6003954410552979, |
|
"eval_runtime": 693.5123, |
|
"eval_samples_per_second": 17.218, |
|
"eval_steps_per_second": 0.539, |
|
"step": 1329 |
|
}, |
|
{ |
|
"epoch": 2.9991537376586743, |
|
"step": 1329, |
|
"total_flos": 5064195066298368.0, |
|
"train_loss": 0.5477321235823218, |
|
"train_runtime": 121628.759, |
|
"train_samples_per_second": 5.596, |
|
"train_steps_per_second": 0.011 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1329, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5064195066298368.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|