{ "best_metric": null, "best_model_checkpoint": null, "epoch": 4.0, "eval_steps": 500, "global_step": 864, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11574074074074074, "grad_norm": 0.8536216020584106, "learning_rate": 5.6818181818181825e-05, "loss": 1.711, "step": 25 }, { "epoch": 0.23148148148148148, "grad_norm": 0.9026548862457275, "learning_rate": 9.926829268292683e-05, "loss": 1.4077, "step": 50 }, { "epoch": 0.3472222222222222, "grad_norm": 0.7126691341400146, "learning_rate": 9.621951219512195e-05, "loss": 1.2191, "step": 75 }, { "epoch": 0.46296296296296297, "grad_norm": 0.8097984194755554, "learning_rate": 9.317073170731708e-05, "loss": 1.1951, "step": 100 }, { "epoch": 0.5787037037037037, "grad_norm": 0.6823762655258179, "learning_rate": 9.01219512195122e-05, "loss": 1.2015, "step": 125 }, { "epoch": 0.6944444444444444, "grad_norm": 0.7754731178283691, "learning_rate": 8.707317073170732e-05, "loss": 1.1927, "step": 150 }, { "epoch": 0.8101851851851852, "grad_norm": 0.7315147519111633, "learning_rate": 8.402439024390244e-05, "loss": 1.1953, "step": 175 }, { "epoch": 0.9259259259259259, "grad_norm": 0.6208141446113586, "learning_rate": 8.097560975609757e-05, "loss": 1.1844, "step": 200 }, { "epoch": 1.0416666666666667, "grad_norm": 0.6663596630096436, "learning_rate": 7.792682926829269e-05, "loss": 1.2056, "step": 225 }, { "epoch": 1.1574074074074074, "grad_norm": 0.6997539401054382, "learning_rate": 7.487804878048781e-05, "loss": 1.1872, "step": 250 }, { "epoch": 1.2731481481481481, "grad_norm": 0.7418563365936279, "learning_rate": 7.182926829268293e-05, "loss": 1.1479, "step": 275 }, { "epoch": 1.3888888888888888, "grad_norm": 0.7975765466690063, "learning_rate": 6.878048780487805e-05, "loss": 1.1444, "step": 300 }, { "epoch": 1.5046296296296298, "grad_norm": 0.759464681148529, "learning_rate": 6.573170731707318e-05, "loss": 1.1614, "step": 325 }, { "epoch": 1.6203703703703702, "grad_norm": 0.7860772609710693, "learning_rate": 6.26829268292683e-05, "loss": 1.1572, "step": 350 }, { "epoch": 1.7361111111111112, "grad_norm": 0.6840630769729614, "learning_rate": 5.9634146341463414e-05, "loss": 1.1765, "step": 375 }, { "epoch": 1.8518518518518519, "grad_norm": 0.6867539882659912, "learning_rate": 5.6585365853658533e-05, "loss": 1.1464, "step": 400 }, { "epoch": 1.9675925925925926, "grad_norm": 0.6985242366790771, "learning_rate": 5.3536585365853666e-05, "loss": 1.1647, "step": 425 }, { "epoch": 2.0833333333333335, "grad_norm": 0.8595041036605835, "learning_rate": 5.0487804878048785e-05, "loss": 1.1289, "step": 450 }, { "epoch": 2.199074074074074, "grad_norm": 0.7451562285423279, "learning_rate": 4.7439024390243905e-05, "loss": 1.1422, "step": 475 }, { "epoch": 2.314814814814815, "grad_norm": 0.8289864659309387, "learning_rate": 4.4390243902439024e-05, "loss": 1.1268, "step": 500 }, { "epoch": 2.4305555555555554, "grad_norm": 0.8567008972167969, "learning_rate": 4.134146341463414e-05, "loss": 1.1591, "step": 525 }, { "epoch": 2.5462962962962963, "grad_norm": 0.8465055823326111, "learning_rate": 3.829268292682927e-05, "loss": 1.1542, "step": 550 }, { "epoch": 2.662037037037037, "grad_norm": 0.8150965571403503, "learning_rate": 3.524390243902439e-05, "loss": 1.1287, "step": 575 }, { "epoch": 2.7777777777777777, "grad_norm": 0.8683028221130371, "learning_rate": 3.2195121951219514e-05, "loss": 1.1429, "step": 600 }, { "epoch": 2.8935185185185186, "grad_norm": 0.8814287781715393, "learning_rate": 2.9146341463414634e-05, "loss": 1.1416, "step": 625 }, { "epoch": 3.009259259259259, "grad_norm": 0.7834553122520447, "learning_rate": 2.609756097560976e-05, "loss": 1.1372, "step": 650 }, { "epoch": 3.125, "grad_norm": 0.9108631014823914, "learning_rate": 2.304878048780488e-05, "loss": 1.14, "step": 675 }, { "epoch": 3.240740740740741, "grad_norm": 0.8761746287345886, "learning_rate": 2e-05, "loss": 1.123, "step": 700 }, { "epoch": 3.3564814814814814, "grad_norm": 0.8887725472450256, "learning_rate": 1.6951219512195124e-05, "loss": 1.1083, "step": 725 }, { "epoch": 3.4722222222222223, "grad_norm": 0.857607364654541, "learning_rate": 1.3902439024390245e-05, "loss": 1.1211, "step": 750 }, { "epoch": 3.587962962962963, "grad_norm": 0.9724395871162415, "learning_rate": 1.0853658536585366e-05, "loss": 1.1038, "step": 775 }, { "epoch": 3.7037037037037037, "grad_norm": 0.9074528217315674, "learning_rate": 7.804878048780489e-06, "loss": 1.1196, "step": 800 }, { "epoch": 3.8194444444444446, "grad_norm": 0.9636090397834778, "learning_rate": 4.75609756097561e-06, "loss": 1.134, "step": 825 }, { "epoch": 3.935185185185185, "grad_norm": 0.8954392075538635, "learning_rate": 1.707317073170732e-06, "loss": 1.1378, "step": 850 } ], "logging_steps": 25, "max_steps": 864, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 500, "total_flos": 1.5105772588577587e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }