{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.06830601092896176, "eval_steps": 25, "global_step": 75, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0009107468123861566, "grad_norm": 2.0517091751098633, "learning_rate": 1.6666666666666667e-05, "loss": 1.0497, "step": 1 }, { "epoch": 0.0009107468123861566, "eval_loss": 3.038726806640625, "eval_runtime": 37.7451, "eval_samples_per_second": 12.266, "eval_steps_per_second": 6.146, "step": 1 }, { "epoch": 0.0018214936247723133, "grad_norm": 2.4123470783233643, "learning_rate": 3.3333333333333335e-05, "loss": 1.1416, "step": 2 }, { "epoch": 0.00273224043715847, "grad_norm": 2.62890887260437, "learning_rate": 5e-05, "loss": 1.2335, "step": 3 }, { "epoch": 0.0036429872495446266, "grad_norm": 1.9658619165420532, "learning_rate": 4.997620553954645e-05, "loss": 1.3257, "step": 4 }, { "epoch": 0.004553734061930784, "grad_norm": 2.432122230529785, "learning_rate": 4.990486745229364e-05, "loss": 1.3111, "step": 5 }, { "epoch": 0.00546448087431694, "grad_norm": 2.159250020980835, "learning_rate": 4.9786121534345265e-05, "loss": 1.2293, "step": 6 }, { "epoch": 0.006375227686703097, "grad_norm": 2.4166738986968994, "learning_rate": 4.962019382530521e-05, "loss": 1.2834, "step": 7 }, { "epoch": 0.007285974499089253, "grad_norm": 1.835946798324585, "learning_rate": 4.940740017799833e-05, "loss": 1.3864, "step": 8 }, { "epoch": 0.00819672131147541, "grad_norm": 1.3662729263305664, "learning_rate": 4.914814565722671e-05, "loss": 0.9429, "step": 9 }, { "epoch": 0.009107468123861567, "grad_norm": 1.5126519203186035, "learning_rate": 4.884292376870567e-05, "loss": 1.1026, "step": 10 }, { "epoch": 0.010018214936247723, "grad_norm": 1.6885071992874146, "learning_rate": 4.849231551964771e-05, "loss": 1.1962, "step": 11 }, { "epoch": 0.01092896174863388, "grad_norm": 1.6031856536865234, "learning_rate": 4.8096988312782174e-05, "loss": 1.2799, "step": 12 }, { "epoch": 0.011839708561020037, "grad_norm": 1.7740166187286377, "learning_rate": 4.765769467591625e-05, "loss": 1.3683, "step": 13 }, { "epoch": 0.012750455373406194, "grad_norm": 1.8252534866333008, "learning_rate": 4.717527082945554e-05, "loss": 1.3839, "step": 14 }, { "epoch": 0.01366120218579235, "grad_norm": 2.1907145977020264, "learning_rate": 4.665063509461097e-05, "loss": 1.5108, "step": 15 }, { "epoch": 0.014571948998178506, "grad_norm": 2.0855190753936768, "learning_rate": 4.608478614532215e-05, "loss": 1.2246, "step": 16 }, { "epoch": 0.015482695810564663, "grad_norm": 2.3677632808685303, "learning_rate": 4.54788011072248e-05, "loss": 1.6782, "step": 17 }, { "epoch": 0.01639344262295082, "grad_norm": 2.1821441650390625, "learning_rate": 4.4833833507280884e-05, "loss": 1.367, "step": 18 }, { "epoch": 0.017304189435336976, "grad_norm": 2.4834225177764893, "learning_rate": 4.415111107797445e-05, "loss": 1.4051, "step": 19 }, { "epoch": 0.018214936247723135, "grad_norm": 2.456498861312866, "learning_rate": 4.34319334202531e-05, "loss": 1.361, "step": 20 }, { "epoch": 0.01912568306010929, "grad_norm": 2.4951350688934326, "learning_rate": 4.267766952966369e-05, "loss": 1.2409, "step": 21 }, { "epoch": 0.020036429872495445, "grad_norm": 2.790161371231079, "learning_rate": 4.188975519039151e-05, "loss": 1.3919, "step": 22 }, { "epoch": 0.020947176684881604, "grad_norm": 3.7281408309936523, "learning_rate": 4.1069690242163484e-05, "loss": 1.5667, "step": 23 }, { "epoch": 0.02185792349726776, "grad_norm": 3.095154047012329, "learning_rate": 4.021903572521802e-05, "loss": 1.3851, "step": 24 }, { "epoch": 0.022768670309653915, "grad_norm": 3.579331159591675, "learning_rate": 3.933941090877615e-05, "loss": 1.547, "step": 25 }, { "epoch": 0.022768670309653915, "eval_loss": 1.554518461227417, "eval_runtime": 37.7497, "eval_samples_per_second": 12.265, "eval_steps_per_second": 6.146, "step": 25 }, { "epoch": 0.023679417122040074, "grad_norm": 3.8335983753204346, "learning_rate": 3.84324902086706e-05, "loss": 1.6246, "step": 26 }, { "epoch": 0.02459016393442623, "grad_norm": 4.2963151931762695, "learning_rate": 3.7500000000000003e-05, "loss": 1.8721, "step": 27 }, { "epoch": 0.025500910746812388, "grad_norm": 4.064708709716797, "learning_rate": 3.654371533087586e-05, "loss": 1.6337, "step": 28 }, { "epoch": 0.026411657559198543, "grad_norm": 3.6003475189208984, "learning_rate": 3.556545654351749e-05, "loss": 1.1948, "step": 29 }, { "epoch": 0.0273224043715847, "grad_norm": 4.95332145690918, "learning_rate": 3.456708580912725e-05, "loss": 1.7435, "step": 30 }, { "epoch": 0.028233151183970857, "grad_norm": 5.380415439605713, "learning_rate": 3.355050358314172e-05, "loss": 1.3499, "step": 31 }, { "epoch": 0.029143897996357013, "grad_norm": 4.800806522369385, "learning_rate": 3.251764498760683e-05, "loss": 1.5518, "step": 32 }, { "epoch": 0.030054644808743168, "grad_norm": 4.674807071685791, "learning_rate": 3.147047612756302e-05, "loss": 1.5142, "step": 33 }, { "epoch": 0.030965391621129327, "grad_norm": 4.52117919921875, "learning_rate": 3.0410990348452573e-05, "loss": 1.4617, "step": 34 }, { "epoch": 0.031876138433515486, "grad_norm": 5.159020900726318, "learning_rate": 2.9341204441673266e-05, "loss": 1.3345, "step": 35 }, { "epoch": 0.03278688524590164, "grad_norm": 5.662403583526611, "learning_rate": 2.8263154805501297e-05, "loss": 1.4164, "step": 36 }, { "epoch": 0.033697632058287796, "grad_norm": 5.426482677459717, "learning_rate": 2.717889356869146e-05, "loss": 1.2144, "step": 37 }, { "epoch": 0.03460837887067395, "grad_norm": 5.797895908355713, "learning_rate": 2.6090484684133404e-05, "loss": 1.3768, "step": 38 }, { "epoch": 0.03551912568306011, "grad_norm": 6.419762134552002, "learning_rate": 2.5e-05, "loss": 1.4736, "step": 39 }, { "epoch": 0.03642987249544627, "grad_norm": 5.924313545227051, "learning_rate": 2.3909515315866605e-05, "loss": 1.2825, "step": 40 }, { "epoch": 0.037340619307832425, "grad_norm": 6.173450469970703, "learning_rate": 2.2821106431308544e-05, "loss": 1.3105, "step": 41 }, { "epoch": 0.03825136612021858, "grad_norm": 8.242639541625977, "learning_rate": 2.173684519449872e-05, "loss": 1.3073, "step": 42 }, { "epoch": 0.039162112932604735, "grad_norm": 8.869377136230469, "learning_rate": 2.0658795558326743e-05, "loss": 1.4705, "step": 43 }, { "epoch": 0.04007285974499089, "grad_norm": 8.427241325378418, "learning_rate": 1.958900965154743e-05, "loss": 1.6696, "step": 44 }, { "epoch": 0.040983606557377046, "grad_norm": 9.052663803100586, "learning_rate": 1.852952387243698e-05, "loss": 1.2254, "step": 45 }, { "epoch": 0.04189435336976321, "grad_norm": 7.14196252822876, "learning_rate": 1.7482355012393177e-05, "loss": 1.314, "step": 46 }, { "epoch": 0.042805100182149364, "grad_norm": 10.285764694213867, "learning_rate": 1.6449496416858284e-05, "loss": 1.9592, "step": 47 }, { "epoch": 0.04371584699453552, "grad_norm": 11.626080513000488, "learning_rate": 1.5432914190872757e-05, "loss": 1.7412, "step": 48 }, { "epoch": 0.044626593806921674, "grad_norm": 16.58131217956543, "learning_rate": 1.443454345648252e-05, "loss": 2.785, "step": 49 }, { "epoch": 0.04553734061930783, "grad_norm": 47.45877456665039, "learning_rate": 1.3456284669124158e-05, "loss": 4.9434, "step": 50 }, { "epoch": 0.04553734061930783, "eval_loss": 1.322178840637207, "eval_runtime": 37.7906, "eval_samples_per_second": 12.252, "eval_steps_per_second": 6.139, "step": 50 }, { "epoch": 0.04644808743169399, "grad_norm": 1.0157305002212524, "learning_rate": 1.2500000000000006e-05, "loss": 0.7862, "step": 51 }, { "epoch": 0.04735883424408015, "grad_norm": 1.3376152515411377, "learning_rate": 1.1567509791329401e-05, "loss": 1.1578, "step": 52 }, { "epoch": 0.0482695810564663, "grad_norm": 1.4561330080032349, "learning_rate": 1.0660589091223855e-05, "loss": 1.0865, "step": 53 }, { "epoch": 0.04918032786885246, "grad_norm": 1.1816233396530151, "learning_rate": 9.780964274781984e-06, "loss": 0.9107, "step": 54 }, { "epoch": 0.05009107468123861, "grad_norm": 1.1890616416931152, "learning_rate": 8.930309757836517e-06, "loss": 0.8108, "step": 55 }, { "epoch": 0.051001821493624776, "grad_norm": 1.186021089553833, "learning_rate": 8.110244809608495e-06, "loss": 0.8508, "step": 56 }, { "epoch": 0.05191256830601093, "grad_norm": 1.1916978359222412, "learning_rate": 7.3223304703363135e-06, "loss": 0.8236, "step": 57 }, { "epoch": 0.052823315118397086, "grad_norm": 1.3242887258529663, "learning_rate": 6.568066579746901e-06, "loss": 0.9529, "step": 58 }, { "epoch": 0.05373406193078324, "grad_norm": 1.2892563343048096, "learning_rate": 5.848888922025553e-06, "loss": 0.8467, "step": 59 }, { "epoch": 0.0546448087431694, "grad_norm": 1.4728500843048096, "learning_rate": 5.166166492719124e-06, "loss": 0.9437, "step": 60 }, { "epoch": 0.05555555555555555, "grad_norm": 1.306962490081787, "learning_rate": 4.521198892775203e-06, "loss": 0.7965, "step": 61 }, { "epoch": 0.056466302367941715, "grad_norm": 1.478961706161499, "learning_rate": 3.9152138546778625e-06, "loss": 0.9545, "step": 62 }, { "epoch": 0.05737704918032787, "grad_norm": 1.672711730003357, "learning_rate": 3.3493649053890326e-06, "loss": 1.1629, "step": 63 }, { "epoch": 0.058287795992714025, "grad_norm": 1.666373610496521, "learning_rate": 2.8247291705444575e-06, "loss": 1.1792, "step": 64 }, { "epoch": 0.05919854280510018, "grad_norm": 1.9274945259094238, "learning_rate": 2.3423053240837515e-06, "loss": 1.1452, "step": 65 }, { "epoch": 0.060109289617486336, "grad_norm": 2.060487747192383, "learning_rate": 1.9030116872178316e-06, "loss": 1.2432, "step": 66 }, { "epoch": 0.0610200364298725, "grad_norm": 2.072204828262329, "learning_rate": 1.5076844803522922e-06, "loss": 1.161, "step": 67 }, { "epoch": 0.061930783242258654, "grad_norm": 2.055114984512329, "learning_rate": 1.1570762312943295e-06, "loss": 1.0735, "step": 68 }, { "epoch": 0.06284153005464481, "grad_norm": 2.482335329055786, "learning_rate": 8.51854342773295e-07, "loss": 1.1558, "step": 69 }, { "epoch": 0.06375227686703097, "grad_norm": 2.711244821548462, "learning_rate": 5.925998220016659e-07, "loss": 1.2527, "step": 70 }, { "epoch": 0.06466302367941712, "grad_norm": 2.62081241607666, "learning_rate": 3.7980617469479953e-07, "loss": 1.2273, "step": 71 }, { "epoch": 0.06557377049180328, "grad_norm": 2.8418915271759033, "learning_rate": 2.1387846565474045e-07, "loss": 1.451, "step": 72 }, { "epoch": 0.06648451730418943, "grad_norm": 2.660665988922119, "learning_rate": 9.513254770636137e-08, "loss": 1.3604, "step": 73 }, { "epoch": 0.06739526411657559, "grad_norm": 2.826326847076416, "learning_rate": 2.3794460453555047e-08, "loss": 1.0152, "step": 74 }, { "epoch": 0.06830601092896176, "grad_norm": 3.0708837509155273, "learning_rate": 0.0, "loss": 1.0459, "step": 75 }, { "epoch": 0.06830601092896176, "eval_loss": 1.2876240015029907, "eval_runtime": 37.5565, "eval_samples_per_second": 12.328, "eval_steps_per_second": 6.177, "step": 75 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.5638785450573824e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }