{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.9946403385049365, "eval_steps": 500, "global_step": 663, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.045133991537376586, "grad_norm": 1.0502111381705257, "learning_rate": 5e-06, "loss": 0.7438, "step": 10 }, { "epoch": 0.09026798307475317, "grad_norm": 0.7055928208506984, "learning_rate": 5e-06, "loss": 0.6709, "step": 20 }, { "epoch": 0.13540197461212977, "grad_norm": 0.5033134209699918, "learning_rate": 5e-06, "loss": 0.6463, "step": 30 }, { "epoch": 0.18053596614950634, "grad_norm": 0.618156041678536, "learning_rate": 5e-06, "loss": 0.6389, "step": 40 }, { "epoch": 0.22566995768688294, "grad_norm": 0.49695878808904403, "learning_rate": 5e-06, "loss": 0.6349, "step": 50 }, { "epoch": 0.27080394922425954, "grad_norm": 0.5844890023067665, "learning_rate": 5e-06, "loss": 0.6247, "step": 60 }, { "epoch": 0.3159379407616361, "grad_norm": 0.6341705695305015, "learning_rate": 5e-06, "loss": 0.6227, "step": 70 }, { "epoch": 0.3610719322990127, "grad_norm": 0.9851855606862004, "learning_rate": 5e-06, "loss": 0.6225, "step": 80 }, { "epoch": 0.40620592383638926, "grad_norm": 0.6200620013182269, "learning_rate": 5e-06, "loss": 0.6161, "step": 90 }, { "epoch": 0.4513399153737659, "grad_norm": 0.4866582707983752, "learning_rate": 5e-06, "loss": 0.6118, "step": 100 }, { "epoch": 0.49647390691114246, "grad_norm": 0.4846006073335934, "learning_rate": 5e-06, "loss": 0.6071, "step": 110 }, { "epoch": 0.5416078984485191, "grad_norm": 0.4640299470151304, "learning_rate": 5e-06, "loss": 0.6108, "step": 120 }, { "epoch": 0.5867418899858956, "grad_norm": 0.5199888815602103, "learning_rate": 5e-06, "loss": 0.6056, "step": 130 }, { "epoch": 0.6318758815232722, "grad_norm": 0.5702031853447341, "learning_rate": 5e-06, "loss": 0.6056, "step": 140 }, { "epoch": 0.6770098730606487, "grad_norm": 0.8001760897422481, "learning_rate": 5e-06, "loss": 0.6054, "step": 150 }, { "epoch": 0.7221438645980254, "grad_norm": 0.5587666551476193, "learning_rate": 5e-06, "loss": 0.6031, "step": 160 }, { "epoch": 0.767277856135402, "grad_norm": 0.43674759924584844, "learning_rate": 5e-06, "loss": 0.5996, "step": 170 }, { "epoch": 0.8124118476727785, "grad_norm": 0.5559456367161821, "learning_rate": 5e-06, "loss": 0.6003, "step": 180 }, { "epoch": 0.8575458392101551, "grad_norm": 0.44306696302984344, "learning_rate": 5e-06, "loss": 0.6049, "step": 190 }, { "epoch": 0.9026798307475318, "grad_norm": 0.5092942073450014, "learning_rate": 5e-06, "loss": 0.5985, "step": 200 }, { "epoch": 0.9478138222849083, "grad_norm": 0.47841352538603515, "learning_rate": 5e-06, "loss": 0.5975, "step": 210 }, { "epoch": 0.9929478138222849, "grad_norm": 0.757057117339616, "learning_rate": 5e-06, "loss": 0.5944, "step": 220 }, { "epoch": 0.9974612129760225, "eval_loss": 0.5952667593955994, "eval_runtime": 355.1756, "eval_samples_per_second": 33.62, "eval_steps_per_second": 0.527, "step": 221 }, { "epoch": 1.039210155148096, "grad_norm": 0.6288422422717695, "learning_rate": 5e-06, "loss": 0.5957, "step": 230 }, { "epoch": 1.0843441466854724, "grad_norm": 0.5368840934195099, "learning_rate": 5e-06, "loss": 0.5546, "step": 240 }, { "epoch": 1.1294781382228491, "grad_norm": 0.5562178733269311, "learning_rate": 5e-06, "loss": 0.5576, "step": 250 }, { "epoch": 1.1746121297602257, "grad_norm": 0.44566707316498066, "learning_rate": 5e-06, "loss": 0.5531, "step": 260 }, { "epoch": 1.2197461212976022, "grad_norm": 0.5774903613750582, "learning_rate": 5e-06, "loss": 0.5549, "step": 270 }, { "epoch": 1.264880112834979, "grad_norm": 0.429832871209753, "learning_rate": 5e-06, "loss": 0.5572, "step": 280 }, { "epoch": 1.3100141043723554, "grad_norm": 0.5244854621545229, "learning_rate": 5e-06, "loss": 0.5565, "step": 290 }, { "epoch": 1.355148095909732, "grad_norm": 0.50009636557672, "learning_rate": 5e-06, "loss": 0.5587, "step": 300 }, { "epoch": 1.4002820874471085, "grad_norm": 0.46468247492051845, "learning_rate": 5e-06, "loss": 0.5557, "step": 310 }, { "epoch": 1.4454160789844852, "grad_norm": 0.6185616791546158, "learning_rate": 5e-06, "loss": 0.555, "step": 320 }, { "epoch": 1.4905500705218617, "grad_norm": 0.5522113108694092, "learning_rate": 5e-06, "loss": 0.5578, "step": 330 }, { "epoch": 1.5356840620592385, "grad_norm": 0.5687883176073543, "learning_rate": 5e-06, "loss": 0.5519, "step": 340 }, { "epoch": 1.580818053596615, "grad_norm": 0.4317341125326038, "learning_rate": 5e-06, "loss": 0.5523, "step": 350 }, { "epoch": 1.6259520451339915, "grad_norm": 0.4415467428367944, "learning_rate": 5e-06, "loss": 0.561, "step": 360 }, { "epoch": 1.671086036671368, "grad_norm": 0.5437873630019581, "learning_rate": 5e-06, "loss": 0.5562, "step": 370 }, { "epoch": 1.7162200282087448, "grad_norm": 0.46394331744324036, "learning_rate": 5e-06, "loss": 0.5552, "step": 380 }, { "epoch": 1.7613540197461213, "grad_norm": 0.4475505593561043, "learning_rate": 5e-06, "loss": 0.5528, "step": 390 }, { "epoch": 1.806488011283498, "grad_norm": 0.5295023528850353, "learning_rate": 5e-06, "loss": 0.5517, "step": 400 }, { "epoch": 1.8516220028208745, "grad_norm": 0.5025367324019494, "learning_rate": 5e-06, "loss": 0.5543, "step": 410 }, { "epoch": 1.896755994358251, "grad_norm": 0.46331161327776976, "learning_rate": 5e-06, "loss": 0.5578, "step": 420 }, { "epoch": 1.9418899858956276, "grad_norm": 0.5483627384227747, "learning_rate": 5e-06, "loss": 0.5486, "step": 430 }, { "epoch": 1.987023977433004, "grad_norm": 0.4673744718040457, "learning_rate": 5e-06, "loss": 0.5591, "step": 440 }, { "epoch": 1.9960507757404795, "eval_loss": 0.5866958498954773, "eval_runtime": 355.7934, "eval_samples_per_second": 33.562, "eval_steps_per_second": 0.526, "step": 442 }, { "epoch": 2.0332863187588153, "grad_norm": 0.7455137586802161, "learning_rate": 5e-06, "loss": 0.5574, "step": 450 }, { "epoch": 2.078420310296192, "grad_norm": 0.7317716271008468, "learning_rate": 5e-06, "loss": 0.5064, "step": 460 }, { "epoch": 2.1235543018335683, "grad_norm": 0.7140712178378773, "learning_rate": 5e-06, "loss": 0.5109, "step": 470 }, { "epoch": 2.168688293370945, "grad_norm": 0.5219136342087098, "learning_rate": 5e-06, "loss": 0.5119, "step": 480 }, { "epoch": 2.213822284908322, "grad_norm": 0.6942766993148072, "learning_rate": 5e-06, "loss": 0.5045, "step": 490 }, { "epoch": 2.2589562764456983, "grad_norm": 0.4751025500905717, "learning_rate": 5e-06, "loss": 0.5058, "step": 500 }, { "epoch": 2.304090267983075, "grad_norm": 0.5526788063335546, "learning_rate": 5e-06, "loss": 0.5134, "step": 510 }, { "epoch": 2.3492242595204513, "grad_norm": 0.5471049911581812, "learning_rate": 5e-06, "loss": 0.5135, "step": 520 }, { "epoch": 2.394358251057828, "grad_norm": 0.5542894654716121, "learning_rate": 5e-06, "loss": 0.5115, "step": 530 }, { "epoch": 2.4394922425952044, "grad_norm": 0.5478659351590376, "learning_rate": 5e-06, "loss": 0.5102, "step": 540 }, { "epoch": 2.4846262341325813, "grad_norm": 0.5494840585563637, "learning_rate": 5e-06, "loss": 0.5163, "step": 550 }, { "epoch": 2.529760225669958, "grad_norm": 0.49237680882767904, "learning_rate": 5e-06, "loss": 0.5115, "step": 560 }, { "epoch": 2.5748942172073344, "grad_norm": 0.47486464629996683, "learning_rate": 5e-06, "loss": 0.5129, "step": 570 }, { "epoch": 2.620028208744711, "grad_norm": 0.5165043118582577, "learning_rate": 5e-06, "loss": 0.5121, "step": 580 }, { "epoch": 2.6651622002820874, "grad_norm": 0.5794476922681516, "learning_rate": 5e-06, "loss": 0.5117, "step": 590 }, { "epoch": 2.710296191819464, "grad_norm": 0.45233116281434377, "learning_rate": 5e-06, "loss": 0.5162, "step": 600 }, { "epoch": 2.7554301833568404, "grad_norm": 0.5214525369766981, "learning_rate": 5e-06, "loss": 0.5132, "step": 610 }, { "epoch": 2.800564174894217, "grad_norm": 0.5938791442315001, "learning_rate": 5e-06, "loss": 0.518, "step": 620 }, { "epoch": 2.845698166431594, "grad_norm": 0.5796881742506971, "learning_rate": 5e-06, "loss": 0.5195, "step": 630 }, { "epoch": 2.8908321579689704, "grad_norm": 0.47397608879546316, "learning_rate": 5e-06, "loss": 0.5135, "step": 640 }, { "epoch": 2.935966149506347, "grad_norm": 0.471420002781148, "learning_rate": 5e-06, "loss": 0.5154, "step": 650 }, { "epoch": 2.9811001410437235, "grad_norm": 0.45834788024298295, "learning_rate": 5e-06, "loss": 0.5153, "step": 660 }, { "epoch": 2.9946403385049365, "eval_loss": 0.5917297005653381, "eval_runtime": 355.5387, "eval_samples_per_second": 33.586, "eval_steps_per_second": 0.526, "step": 663 }, { "epoch": 2.9946403385049365, "step": 663, "total_flos": 5052524767739904.0, "train_loss": 0.5641303292586612, "train_runtime": 61317.9429, "train_samples_per_second": 11.1, "train_steps_per_second": 0.011 } ], "logging_steps": 10, "max_steps": 663, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5052524767739904.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }