{ "best_metric": null, "best_model_checkpoint": null, "epoch": 4.9504950495049505, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.25, "grad_norm": 0.27145588397979736, "learning_rate": 0.00019876883405951377, "loss": 1.4266, "step": 50 }, { "epoch": 0.5, "grad_norm": 0.269089013338089, "learning_rate": 0.00019510565162951537, "loss": 0.8104, "step": 100 }, { "epoch": 0.74, "grad_norm": 0.296158105134964, "learning_rate": 0.0001891006524188368, "loss": 0.7768, "step": 150 }, { "epoch": 0.99, "grad_norm": 0.3121841847896576, "learning_rate": 0.00018090169943749476, "loss": 0.7581, "step": 200 }, { "epoch": 1.24, "grad_norm": 0.3815813362598419, "learning_rate": 0.00017071067811865476, "loss": 0.7181, "step": 250 }, { "epoch": 1.49, "grad_norm": 0.395882248878479, "learning_rate": 0.00015877852522924732, "loss": 0.7148, "step": 300 }, { "epoch": 1.73, "grad_norm": 0.3826078772544861, "learning_rate": 0.00014539904997395468, "loss": 0.7116, "step": 350 }, { "epoch": 1.98, "grad_norm": 0.42428573966026306, "learning_rate": 0.00013090169943749476, "loss": 0.7189, "step": 400 }, { "epoch": 2.23, "grad_norm": 0.45111292600631714, "learning_rate": 0.0001156434465040231, "loss": 0.6538, "step": 450 }, { "epoch": 2.48, "grad_norm": 0.519710898399353, "learning_rate": 0.0001, "loss": 0.6567, "step": 500 }, { "epoch": 2.72, "grad_norm": 0.5772411823272705, "learning_rate": 8.435655349597689e-05, "loss": 0.6599, "step": 550 }, { "epoch": 2.97, "grad_norm": 0.5229880809783936, "learning_rate": 6.909830056250527e-05, "loss": 0.6554, "step": 600 }, { "epoch": 3.22, "grad_norm": 0.5204843282699585, "learning_rate": 5.4600950026045326e-05, "loss": 0.6174, "step": 650 }, { "epoch": 3.47, "grad_norm": 0.5765883326530457, "learning_rate": 4.12214747707527e-05, "loss": 0.6014, "step": 700 }, { "epoch": 3.71, "grad_norm": 0.6551887392997742, "learning_rate": 2.9289321881345254e-05, "loss": 0.6136, "step": 750 }, { "epoch": 3.96, "grad_norm": 0.6366761922836304, "learning_rate": 1.9098300562505266e-05, "loss": 0.6126, "step": 800 }, { "epoch": 4.21, "grad_norm": 0.6372557878494263, "learning_rate": 1.0899347581163221e-05, "loss": 0.5769, "step": 850 }, { "epoch": 4.46, "grad_norm": 0.6376144886016846, "learning_rate": 4.8943483704846475e-06, "loss": 0.5865, "step": 900 }, { "epoch": 4.7, "grad_norm": 0.6038933396339417, "learning_rate": 1.231165940486234e-06, "loss": 0.5776, "step": 950 }, { "epoch": 4.95, "grad_norm": 0.6120941042900085, "learning_rate": 0.0, "loss": 0.5869, "step": 1000 } ], "logging_steps": 50, "max_steps": 1000, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 2.0899255999524864e+16, "train_batch_size": 7, "trial_name": null, "trial_params": null }