{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.8021265092809515, "eval_steps": 500, "global_step": 20000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04505316273202379, "grad_norm": 4470.23388671875, "learning_rate": 1.25e-05, "loss": 17941.426, "step": 500 }, { "epoch": 0.09010632546404758, "grad_norm": 2147.42724609375, "learning_rate": 2.5e-05, "loss": 684.2933, "step": 1000 }, { "epoch": 0.13515948819607138, "grad_norm": 5162.5986328125, "learning_rate": 3.7500000000000003e-05, "loss": 536.4867, "step": 1500 }, { "epoch": 0.18021265092809516, "grad_norm": 3121.87646484375, "learning_rate": 5e-05, "loss": 434.1654, "step": 2000 }, { "epoch": 0.22526581366011894, "grad_norm": 3676.3232421875, "learning_rate": 4.990486745229364e-05, "loss": 386.8585, "step": 2500 }, { "epoch": 0.27031897639214275, "grad_norm": 7765.45458984375, "learning_rate": 4.962019382530521e-05, "loss": 335.3436, "step": 3000 }, { "epoch": 0.3153721391241665, "grad_norm": 2876.93701171875, "learning_rate": 4.914814565722671e-05, "loss": 322.3182, "step": 3500 }, { "epoch": 0.3604253018561903, "grad_norm": 0.0, "learning_rate": 4.849231551964771e-05, "loss": 293.0514, "step": 4000 }, { "epoch": 0.40547846458821407, "grad_norm": 2518.416259765625, "learning_rate": 4.765769467591625e-05, "loss": 269.1285, "step": 4500 }, { "epoch": 0.4505316273202379, "grad_norm": 3044.99853515625, "learning_rate": 4.665063509461097e-05, "loss": 277.6437, "step": 5000 }, { "epoch": 0.4955847900522617, "grad_norm": 2915.5595703125, "learning_rate": 4.54788011072248e-05, "loss": 258.6959, "step": 5500 }, { "epoch": 0.5406379527842855, "grad_norm": 0.0, "learning_rate": 4.415111107797445e-05, "loss": 260.6136, "step": 6000 }, { "epoch": 0.5856911155163093, "grad_norm": 3373.0654296875, "learning_rate": 4.267766952966369e-05, "loss": 250.7846, "step": 6500 }, { "epoch": 0.630744278248333, "grad_norm": 2416.063232421875, "learning_rate": 4.1069690242163484e-05, "loss": 240.0419, "step": 7000 }, { "epoch": 0.6757974409803568, "grad_norm": 3405.637451171875, "learning_rate": 3.933941090877615e-05, "loss": 242.7395, "step": 7500 }, { "epoch": 0.7208506037123806, "grad_norm": 0.0, "learning_rate": 3.7500000000000003e-05, "loss": 236.5273, "step": 8000 }, { "epoch": 0.7659037664444044, "grad_norm": 2412.057861328125, "learning_rate": 3.556545654351749e-05, "loss": 225.7031, "step": 8500 }, { "epoch": 0.8109569291764281, "grad_norm": 8866.654296875, "learning_rate": 3.355050358314172e-05, "loss": 232.8072, "step": 9000 }, { "epoch": 0.856010091908452, "grad_norm": 2159.58642578125, "learning_rate": 3.147047612756302e-05, "loss": 228.5174, "step": 9500 }, { "epoch": 0.9010632546404758, "grad_norm": 3101.438720703125, "learning_rate": 2.9341204441673266e-05, "loss": 226.6102, "step": 10000 }, { "epoch": 0.9461164173724995, "grad_norm": 2370.216552734375, "learning_rate": 2.717889356869146e-05, "loss": 221.0378, "step": 10500 }, { "epoch": 0.9911695801045234, "grad_norm": 1607.97900390625, "learning_rate": 2.5e-05, "loss": 208.4827, "step": 11000 }, { "epoch": 1.0, "eval_loss": 237.83326721191406, "eval_runtime": 242.6406, "eval_samples_per_second": 40.657, "eval_steps_per_second": 5.086, "step": 11098 }, { "epoch": 1.0362227428365471, "grad_norm": 1659.910400390625, "learning_rate": 2.2821106431308544e-05, "loss": 202.1607, "step": 11500 }, { "epoch": 1.081275905568571, "grad_norm": 1811.052001953125, "learning_rate": 2.0658795558326743e-05, "loss": 198.4596, "step": 12000 }, { "epoch": 1.1263290683005946, "grad_norm": 4330.6376953125, "learning_rate": 1.852952387243698e-05, "loss": 194.4083, "step": 12500 }, { "epoch": 1.1713822310326185, "grad_norm": 2148.2275390625, "learning_rate": 1.6449496416858284e-05, "loss": 196.8616, "step": 13000 }, { "epoch": 1.2164353937646424, "grad_norm": 2007.6566162109375, "learning_rate": 1.443454345648252e-05, "loss": 193.8067, "step": 13500 }, { "epoch": 1.261488556496666, "grad_norm": 1964.06884765625, "learning_rate": 1.2500000000000006e-05, "loss": 189.0563, "step": 14000 }, { "epoch": 1.30654171922869, "grad_norm": 2146.515625, "learning_rate": 1.0660589091223855e-05, "loss": 187.8685, "step": 14500 }, { "epoch": 1.3515948819607138, "grad_norm": 0.0, "learning_rate": 8.930309757836517e-06, "loss": 191.7554, "step": 15000 }, { "epoch": 1.3966480446927374, "grad_norm": 2082.637451171875, "learning_rate": 7.3223304703363135e-06, "loss": 186.9572, "step": 15500 }, { "epoch": 1.4417012074247613, "grad_norm": 3713.958984375, "learning_rate": 5.848888922025553e-06, "loss": 187.3405, "step": 16000 }, { "epoch": 1.4867543701567851, "grad_norm": 3464.310302734375, "learning_rate": 4.521198892775203e-06, "loss": 184.6902, "step": 16500 }, { "epoch": 1.5318075328888088, "grad_norm": 2287.31494140625, "learning_rate": 3.3493649053890326e-06, "loss": 185.3727, "step": 17000 }, { "epoch": 1.5768606956208324, "grad_norm": 2169.33544921875, "learning_rate": 2.3423053240837515e-06, "loss": 189.5521, "step": 17500 }, { "epoch": 1.6219138583528565, "grad_norm": 4489.81787109375, "learning_rate": 1.5076844803522922e-06, "loss": 186.5204, "step": 18000 }, { "epoch": 1.6669670210848802, "grad_norm": 2658.7236328125, "learning_rate": 8.51854342773295e-07, "loss": 178.6977, "step": 18500 }, { "epoch": 1.7120201838169038, "grad_norm": 2208.738037109375, "learning_rate": 3.7980617469479953e-07, "loss": 179.4614, "step": 19000 }, { "epoch": 1.7570733465489279, "grad_norm": 5405.3544921875, "learning_rate": 9.513254770636137e-08, "loss": 179.7739, "step": 19500 }, { "epoch": 1.8021265092809515, "grad_norm": 2117.037353515625, "learning_rate": 0.0, "loss": 178.2009, "step": 20000 } ], "logging_steps": 500, "max_steps": 20000, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 5000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }