|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 978, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 71.33833396142957, |
|
"learning_rate": 2.040816326530612e-06, |
|
"loss": 16.3689, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 71.89278720209892, |
|
"learning_rate": 1.0204081632653061e-05, |
|
"loss": 16.1499, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 65.72662945467931, |
|
"learning_rate": 2.0408163265306123e-05, |
|
"loss": 15.7133, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 53.593107939489194, |
|
"learning_rate": 3.061224489795919e-05, |
|
"loss": 13.3181, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 44.297380165268656, |
|
"learning_rate": 4.0816326530612245e-05, |
|
"loss": 9.8593, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 19.985681179385026, |
|
"learning_rate": 5.102040816326531e-05, |
|
"loss": 6.3007, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 12.900325144421931, |
|
"learning_rate": 6.122448979591838e-05, |
|
"loss": 4.5204, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 9.486242109363502, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 3.3761, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 7.1186109989022945, |
|
"learning_rate": 8.163265306122449e-05, |
|
"loss": 2.3811, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 5.023418237589821, |
|
"learning_rate": 9.183673469387756e-05, |
|
"loss": 1.851, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 4.554190050004734, |
|
"learning_rate": 0.00010204081632653062, |
|
"loss": 1.6038, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 2.8804678557363896, |
|
"learning_rate": 0.00011224489795918367, |
|
"loss": 1.4206, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 3.9529502182289398, |
|
"learning_rate": 0.00012244897959183676, |
|
"loss": 1.3664, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 2.248441902031195, |
|
"learning_rate": 0.0001326530612244898, |
|
"loss": 1.274, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 1.4413020771523257, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 1.2439, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 1.493793739973276, |
|
"learning_rate": 0.0001530612244897959, |
|
"loss": 1.1924, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 1.458616609136318, |
|
"learning_rate": 0.00016326530612244898, |
|
"loss": 1.0841, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 1.6382650720922636, |
|
"learning_rate": 0.00017346938775510205, |
|
"loss": 1.1002, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 1.326596598600597, |
|
"learning_rate": 0.00018367346938775512, |
|
"loss": 1.0797, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 1.2754617583755954, |
|
"learning_rate": 0.00019387755102040816, |
|
"loss": 1.1313, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 1.399995470448795, |
|
"learning_rate": 0.00019999745104274993, |
|
"loss": 1.0745, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 1.5754181414386559, |
|
"learning_rate": 0.00019996877676598733, |
|
"loss": 1.0513, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 1.9062455011264745, |
|
"learning_rate": 0.00019990825118233957, |
|
"loss": 1.0973, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 1.2370608192620602, |
|
"learning_rate": 0.00019981589357601727, |
|
"loss": 1.0174, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 1.4957785022540953, |
|
"learning_rate": 0.0001996917333733128, |
|
"loss": 1.0495, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 2.0317868737729614, |
|
"learning_rate": 0.00019953581013322502, |
|
"loss": 1.0631, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 2.3640165600919016, |
|
"learning_rate": 0.00019934817353485501, |
|
"loss": 1.0597, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 1.4377618154920253, |
|
"learning_rate": 0.00019912888336157792, |
|
"loss": 1.032, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 1.6672769945555381, |
|
"learning_rate": 0.00019887800948199496, |
|
"loss": 1.0587, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 1.7175054641214251, |
|
"learning_rate": 0.00019859563182767268, |
|
"loss": 0.9808, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 2.055095058889153, |
|
"learning_rate": 0.00019828184036767556, |
|
"loss": 1.0301, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.1983943009571292, |
|
"learning_rate": 0.00019793673507990088, |
|
"loss": 0.9844, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 1.3919505852905427, |
|
"learning_rate": 0.00019756042591922434, |
|
"loss": 1.0325, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 1.0559448337045312, |
|
"learning_rate": 0.00019715303278246724, |
|
"loss": 0.9838, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 1.2948165594267842, |
|
"learning_rate": 0.00019671468547019573, |
|
"loss": 1.0184, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 1.757117551491023, |
|
"learning_rate": 0.00019624552364536473, |
|
"loss": 0.9951, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 1.0463120070964065, |
|
"learning_rate": 0.00019574569678881964, |
|
"loss": 0.9886, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 1.3316373554522543, |
|
"learning_rate": 0.00019521536415166978, |
|
"loss": 1.029, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 1.206556314151547, |
|
"learning_rate": 0.000194654694704549, |
|
"loss": 1.0185, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 1.4497941420805274, |
|
"learning_rate": 0.00019406386708377955, |
|
"loss": 0.9952, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 1.5111306426761502, |
|
"learning_rate": 0.00019344306953445633, |
|
"loss": 0.9533, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 1.5977185668213372, |
|
"learning_rate": 0.00019279249985046948, |
|
"loss": 0.9654, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 1.4919134437495485, |
|
"learning_rate": 0.000192112365311485, |
|
"loss": 0.9859, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 1.5403119826226057, |
|
"learning_rate": 0.00019140288261690276, |
|
"loss": 1.0116, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 1.3349640842488235, |
|
"learning_rate": 0.00019066427781681315, |
|
"loss": 0.9787, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 1.3368327515154408, |
|
"learning_rate": 0.00018989678623997503, |
|
"loss": 0.9729, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 1.3225697881784793, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 0.9581, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 1.2111399093084592, |
|
"learning_rate": 0.00018827613001162532, |
|
"loss": 0.9993, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 1.4099026636155603, |
|
"learning_rate": 0.00018742348172152726, |
|
"loss": 0.9647, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 1.2230878261166291, |
|
"learning_rate": 0.00018654297921298863, |
|
"loss": 0.9514, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 1.376607259051782, |
|
"learning_rate": 0.0001856349030251589, |
|
"loss": 1.0106, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 1.5573343816575618, |
|
"learning_rate": 0.0001846995424825079, |
|
"loss": 0.9622, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.0889975412225616, |
|
"learning_rate": 0.00018373719560264327, |
|
"loss": 1.0253, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 1.2977152056359567, |
|
"learning_rate": 0.0001827481690013584, |
|
"loss": 1.0099, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 1.3195763372373182, |
|
"learning_rate": 0.0001817327777949407, |
|
"loss": 0.9993, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 1.2289773734368, |
|
"learning_rate": 0.00018069134549977172, |
|
"loss": 0.9763, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 1.2177961464095397, |
|
"learning_rate": 0.00017962420392925066, |
|
"loss": 0.9791, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 1.1607055291626518, |
|
"learning_rate": 0.00017853169308807448, |
|
"loss": 0.9801, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 1.2705107901850918, |
|
"learning_rate": 0.00017741416106390826, |
|
"loss": 0.9328, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 1.3432344503611726, |
|
"learning_rate": 0.00017627196391647982, |
|
"loss": 0.955, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.9736443478187509, |
|
"learning_rate": 0.00017510546556413498, |
|
"loss": 1.0068, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 1.2298616794502824, |
|
"learning_rate": 0.00017391503766788828, |
|
"loss": 0.9365, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 1.2782580681474314, |
|
"learning_rate": 0.00017270105951300738, |
|
"loss": 0.9398, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 1.043708677429849, |
|
"learning_rate": 0.0001714639178881678, |
|
"loss": 1.0062, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 1.042074622226514, |
|
"learning_rate": 0.00017020400696221737, |
|
"loss": 0.9572, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.238677723678181, |
|
"learning_rate": 0.00016892172815858894, |
|
"loss": 0.9772, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.159922480583191, |
|
"eval_runtime": 156.0746, |
|
"eval_samples_per_second": 14.801, |
|
"eval_steps_per_second": 0.468, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 1.0217319894407395, |
|
"learning_rate": 0.00016761749002740193, |
|
"loss": 0.946, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 1.1246569533018882, |
|
"learning_rate": 0.00016629170811529318, |
|
"loss": 0.918, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 1.002004590448949, |
|
"learning_rate": 0.00016494480483301836, |
|
"loss": 0.9497, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 1.212522253836943, |
|
"learning_rate": 0.00016357720932086688, |
|
"loss": 0.8865, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 1.1180679319761555, |
|
"learning_rate": 0.00016218935731193224, |
|
"loss": 0.9137, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 1.243690740596817, |
|
"learning_rate": 0.00016078169099328197, |
|
"loss": 0.928, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 1.0059412266802719, |
|
"learning_rate": 0.00015935465886507142, |
|
"loss": 0.9132, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 1.449853291817741, |
|
"learning_rate": 0.0001579087155976459, |
|
"loss": 0.9236, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 1.200969298535005, |
|
"learning_rate": 0.00015644432188667695, |
|
"loss": 0.9191, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 1.3541813597090206, |
|
"learning_rate": 0.00015496194430637903, |
|
"loss": 0.9882, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 1.2265813596883026, |
|
"learning_rate": 0.00015346205516085306, |
|
"loss": 0.9468, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 1.4522657029483004, |
|
"learning_rate": 0.0001519451323336044, |
|
"loss": 0.8801, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 1.0083988226706067, |
|
"learning_rate": 0.0001504116591352832, |
|
"loss": 0.9001, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 1.2286558458574148, |
|
"learning_rate": 0.00014886212414969553, |
|
"loss": 0.9405, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 1.269365799457322, |
|
"learning_rate": 0.00014729702107813436, |
|
"loss": 1.0354, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 1.5775962375858563, |
|
"learning_rate": 0.00014571684858208044, |
|
"loss": 0.9391, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 1.5593272986635398, |
|
"learning_rate": 0.00014412211012432212, |
|
"loss": 0.8933, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 2.0000926705831166, |
|
"learning_rate": 0.00014251331380854603, |
|
"loss": 0.9003, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 1.3265098276554033, |
|
"learning_rate": 0.00014089097221744868, |
|
"loss": 0.9548, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.9964611399554132, |
|
"learning_rate": 0.00013925560224942144, |
|
"loss": 0.9493, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 1.0573997683696912, |
|
"learning_rate": 0.00013760772495385998, |
|
"loss": 0.9212, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 1.066450746302158, |
|
"learning_rate": 0.00013594786536515153, |
|
"loss": 0.9229, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 1.030770134357563, |
|
"learning_rate": 0.00013427655233539228, |
|
"loss": 0.9116, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 1.0183082642006178, |
|
"learning_rate": 0.00013259431836588843, |
|
"loss": 0.9292, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 1.132995542647628, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.9155, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 1.0945930214994195, |
|
"learning_rate": 0.00012919923483984414, |
|
"loss": 0.9098, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.7625306665500079, |
|
"learning_rate": 0.00012748746699952338, |
|
"loss": 0.8722, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 1.2062480398980977, |
|
"learning_rate": 0.00012576694130724905, |
|
"loss": 0.8827, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 1.0298307072907715, |
|
"learning_rate": 0.00012403820594409924, |
|
"loss": 0.9001, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 1.1672438274286052, |
|
"learning_rate": 0.00012230181170685636, |
|
"loss": 0.9191, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.9217837301990276, |
|
"learning_rate": 0.00012055831183251607, |
|
"loss": 0.9126, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"grad_norm": 0.9859571731260532, |
|
"learning_rate": 0.00011880826182201926, |
|
"loss": 0.8884, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.9295473746329976, |
|
"learning_rate": 0.0001170522192632624, |
|
"loss": 0.9002, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.9211214138607775, |
|
"learning_rate": 0.00011529074365344301, |
|
"loss": 0.8813, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 0.9277063802264193, |
|
"learning_rate": 0.00011352439622079689, |
|
"loss": 0.9304, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.9836858148590184, |
|
"learning_rate": 0.00011175373974578378, |
|
"loss": 0.9336, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.9201365394724025, |
|
"learning_rate": 0.00010997933838177827, |
|
"loss": 0.9007, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 1.0294642137453307, |
|
"learning_rate": 0.00010820175747532373, |
|
"loss": 0.9182, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.9815519204802536, |
|
"learning_rate": 0.00010642156338600551, |
|
"loss": 0.95, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 1.00497732793873, |
|
"learning_rate": 0.00010463932330600196, |
|
"loss": 0.905, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 1.0176550178508887, |
|
"learning_rate": 0.00010285560507936961, |
|
"loss": 0.9369, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 0.9691655097650015, |
|
"learning_rate": 0.0001010709770211212, |
|
"loss": 0.9151, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 1.043651874834748, |
|
"learning_rate": 9.928600773615307e-05, |
|
"loss": 0.9027, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 1.008769921338687, |
|
"learning_rate": 9.750126593808082e-05, |
|
"loss": 0.8884, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 1.0140163426042983, |
|
"learning_rate": 9.571732026803977e-05, |
|
"loss": 0.9461, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.8868172626319482, |
|
"learning_rate": 9.393473911350893e-05, |
|
"loss": 0.9176, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.9481462007119397, |
|
"learning_rate": 9.215409042721552e-05, |
|
"loss": 0.8978, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 0.976465698662806, |
|
"learning_rate": 9.037594154617812e-05, |
|
"loss": 0.9459, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 1.0361077003461703, |
|
"learning_rate": 8.860085901094595e-05, |
|
"loss": 0.8965, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.9744756453463735, |
|
"learning_rate": 8.682940838509207e-05, |
|
"loss": 0.9142, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 1.1336224000575972, |
|
"learning_rate": 8.50621540750175e-05, |
|
"loss": 0.8745, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 0.9837911952285221, |
|
"learning_rate": 8.329965915012451e-05, |
|
"loss": 0.9572, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 1.0097764972768413, |
|
"learning_rate": 8.154248516341548e-05, |
|
"loss": 0.9534, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 0.9690942378398057, |
|
"learning_rate": 7.979119197257505e-05, |
|
"loss": 0.9227, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 1.0127982484169298, |
|
"learning_rate": 7.804633756159259e-05, |
|
"loss": 0.9084, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 0.7943207513052626, |
|
"learning_rate": 7.63084778629813e-05, |
|
"loss": 0.9248, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 0.9647056317342992, |
|
"learning_rate": 7.457816658065134e-05, |
|
"loss": 0.9431, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 1.1403897570938433, |
|
"learning_rate": 7.285595501349258e-05, |
|
"loss": 0.9263, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 1.0404617304378672, |
|
"learning_rate": 7.114239187972416e-05, |
|
"loss": 0.9103, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 1.128471625008042, |
|
"learning_rate": 6.94380231420656e-05, |
|
"loss": 0.9093, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"grad_norm": 0.9916742401674975, |
|
"learning_rate": 6.774339183378663e-05, |
|
"loss": 0.9331, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.8859012774344858, |
|
"learning_rate": 6.60590378856896e-05, |
|
"loss": 0.8951, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.9040358847526676, |
|
"learning_rate": 6.438549795408106e-05, |
|
"loss": 0.9024, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 0.9311904528713643, |
|
"learning_rate": 6.272330524978613e-05, |
|
"loss": 0.9135, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"grad_norm": 0.9264414330433429, |
|
"learning_rate": 6.107298936826086e-05, |
|
"loss": 0.9241, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.123016595840454, |
|
"eval_runtime": 155.2791, |
|
"eval_samples_per_second": 14.876, |
|
"eval_steps_per_second": 0.47, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"grad_norm": 2.0610368602503826, |
|
"learning_rate": 5.9435076120856616e-05, |
|
"loss": 0.8977, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 1.1812695122093289, |
|
"learning_rate": 5.7810087367289744e-05, |
|
"loss": 0.8808, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 1.1330049331313394, |
|
"learning_rate": 5.619854084937085e-05, |
|
"loss": 0.8697, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"grad_norm": 1.2350886571017046, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 0.8605, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"grad_norm": 1.0239529925811819, |
|
"learning_rate": 5.301782390979929e-05, |
|
"loss": 0.9032, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"grad_norm": 1.1559449179497776, |
|
"learning_rate": 5.1449666904481585e-05, |
|
"loss": 0.8576, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 1.0036439705356706, |
|
"learning_rate": 4.989697864459452e-05, |
|
"loss": 0.8811, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 0.9058518668863318, |
|
"learning_rate": 4.836025383610382e-05, |
|
"loss": 0.8795, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"grad_norm": 1.098710487990691, |
|
"learning_rate": 4.683998209881943e-05, |
|
"loss": 0.8806, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 1.0781974748771004, |
|
"learning_rate": 4.5336647810396215e-05, |
|
"loss": 0.8598, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 1.0706338013924643, |
|
"learning_rate": 4.385072995200532e-05, |
|
"loss": 0.8635, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 0.9548080567529033, |
|
"learning_rate": 4.238270195572472e-05, |
|
"loss": 0.8757, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 1.001721295347845, |
|
"learning_rate": 4.093303155369771e-05, |
|
"loss": 0.8919, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"grad_norm": 1.0089807158142916, |
|
"learning_rate": 3.9502180629107756e-05, |
|
"loss": 0.8483, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"grad_norm": 0.903535471960529, |
|
"learning_rate": 3.8090605069016595e-05, |
|
"loss": 0.9141, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 0.9151685871279929, |
|
"learning_rate": 3.669875461911297e-05, |
|
"loss": 0.8872, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"grad_norm": 1.1222577897364008, |
|
"learning_rate": 3.53270727404179e-05, |
|
"loss": 0.8424, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 0.9362543386209857, |
|
"learning_rate": 3.397599646799256e-05, |
|
"loss": 0.9175, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 1.0462736025526818, |
|
"learning_rate": 3.2645956271693257e-05, |
|
"loss": 0.8897, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"grad_norm": 0.9259326236334475, |
|
"learning_rate": 3.133737591901864e-05, |
|
"loss": 0.8671, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 0.9219118148042651, |
|
"learning_rate": 3.0050672340091725e-05, |
|
"loss": 0.9011, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 0.8476861060157034, |
|
"learning_rate": 2.8786255494820835e-05, |
|
"loss": 0.9027, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 0.847331969421794, |
|
"learning_rate": 2.754452824228132e-05, |
|
"loss": 0.8339, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 0.9426248637438077, |
|
"learning_rate": 2.6325886212359498e-05, |
|
"loss": 0.9129, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"grad_norm": 1.0772706903168978, |
|
"learning_rate": 2.51307176797001e-05, |
|
"loss": 0.9141, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 0.921621759803798, |
|
"learning_rate": 2.3959403439996907e-05, |
|
"loss": 0.8866, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"grad_norm": 1.0228032129160294, |
|
"learning_rate": 2.2812316688666737e-05, |
|
"loss": 0.8411, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 0.9794026584875383, |
|
"learning_rate": 2.1689822901944457e-05, |
|
"loss": 0.8148, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 1.1116773627967562, |
|
"learning_rate": 2.0592279720437858e-05, |
|
"loss": 0.9003, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 0.9563546052669486, |
|
"learning_rate": 1.9520036835178668e-05, |
|
"loss": 0.8883, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 0.9044636934616683, |
|
"learning_rate": 1.847343587620679e-05, |
|
"loss": 0.8769, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 0.9221646859533941, |
|
"learning_rate": 1.74528103037226e-05, |
|
"loss": 0.8033, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.0009290473276315, |
|
"learning_rate": 1.645848530184233e-05, |
|
"loss": 0.8381, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"grad_norm": 0.9671119344154833, |
|
"learning_rate": 1.5490777674990376e-05, |
|
"loss": 0.8645, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 0.9041738041322639, |
|
"learning_rate": 1.4549995746961332e-05, |
|
"loss": 0.8624, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 0.9947155263280221, |
|
"learning_rate": 1.3636439262684298e-05, |
|
"loss": 0.8862, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"grad_norm": 1.1355384323430027, |
|
"learning_rate": 1.2750399292720283e-05, |
|
"loss": 0.8914, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 0.9481996675285544, |
|
"learning_rate": 1.1892158140523546e-05, |
|
"loss": 0.8962, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"grad_norm": 0.963461677617234, |
|
"learning_rate": 1.1061989252496053e-05, |
|
"loss": 0.8738, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"grad_norm": 0.9731419353271179, |
|
"learning_rate": 1.026015713086418e-05, |
|
"loss": 0.8955, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 0.9744266823325466, |
|
"learning_rate": 9.486917249404815e-06, |
|
"loss": 0.9313, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 0.8667111998887408, |
|
"learning_rate": 8.742515972048404e-06, |
|
"loss": 0.83, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 1.007522159578116, |
|
"learning_rate": 8.027190474384127e-06, |
|
"loss": 0.9093, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"grad_norm": 0.9497316726005834, |
|
"learning_rate": 7.341168668092857e-06, |
|
"loss": 0.8619, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 0.9890888660929842, |
|
"learning_rate": 6.684669128331655e-06, |
|
"loss": 0.8994, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 0.9637211127886939, |
|
"learning_rate": 6.057901024092949e-06, |
|
"loss": 0.8815, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"grad_norm": 0.9454153835142218, |
|
"learning_rate": 5.461064051560705e-06, |
|
"loss": 0.8748, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"grad_norm": 0.9656325292151272, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.9164, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"grad_norm": 1.167721756716136, |
|
"learning_rate": 4.357934543593045e-06, |
|
"loss": 0.8695, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 1.0842862872661083, |
|
"learning_rate": 3.851993479063154e-06, |
|
"loss": 0.864, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 0.9136231470411893, |
|
"learning_rate": 3.376686376067695e-06, |
|
"loss": 0.8723, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 0.8333735277456533, |
|
"learning_rate": 2.9321646734147502e-06, |
|
"loss": 0.8476, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"grad_norm": 0.9013760974476497, |
|
"learning_rate": 2.51857000129756e-06, |
|
"loss": 0.8046, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"grad_norm": 0.9502325737857031, |
|
"learning_rate": 2.1360341361692517e-06, |
|
"loss": 0.8794, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"grad_norm": 1.0665384348019042, |
|
"learning_rate": 1.784678958757291e-06, |
|
"loss": 0.8471, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 0.867152754297921, |
|
"learning_rate": 1.4646164152307018e-06, |
|
"loss": 0.8471, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 0.8765872693616653, |
|
"learning_rate": 1.1759484815326294e-06, |
|
"loss": 0.8778, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"grad_norm": 0.9977478387159145, |
|
"learning_rate": 9.187671308895418e-07, |
|
"loss": 0.8814, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 1.0928634095287688, |
|
"learning_rate": 6.931543045073708e-07, |
|
"loss": 0.8518, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 0.9615275585544613, |
|
"learning_rate": 4.991818854640395e-07, |
|
"loss": 0.8465, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"grad_norm": 0.946638261056933, |
|
"learning_rate": 3.369116758066171e-07, |
|
"loss": 0.8591, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"grad_norm": 0.9273052910302554, |
|
"learning_rate": 2.0639537686037991e-07, |
|
"loss": 0.8652, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 0.9396149477675896, |
|
"learning_rate": 1.0767457275615567e-07, |
|
"loss": 0.8919, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"grad_norm": 0.9064011249970909, |
|
"learning_rate": 4.078071718107701e-08, |
|
"loss": 0.8436, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 0.9696335526663976, |
|
"learning_rate": 5.735123357042404e-09, |
|
"loss": 0.8687, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.123045563697815, |
|
"eval_runtime": 155.2182, |
|
"eval_samples_per_second": 14.882, |
|
"eval_steps_per_second": 0.47, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 978, |
|
"total_flos": 2513689986990080.0, |
|
"train_loss": 1.2729851507214194, |
|
"train_runtime": 11657.2618, |
|
"train_samples_per_second": 5.364, |
|
"train_steps_per_second": 0.084 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 978, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 2513689986990080.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|