{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.7391304347826086, "eval_steps": 9, "global_step": 100, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.017391304347826087, "eval_loss": 1.59055495262146, "eval_runtime": 2.6995, "eval_samples_per_second": 35.932, "eval_steps_per_second": 4.816, "step": 1 }, { "epoch": 0.05217391304347826, "grad_norm": 0.5041409134864807, "learning_rate": 1.5e-05, "loss": 1.7739, "step": 3 }, { "epoch": 0.10434782608695652, "grad_norm": 0.5414333343505859, "learning_rate": 3e-05, "loss": 1.402, "step": 6 }, { "epoch": 0.1565217391304348, "grad_norm": 0.3529357612133026, "learning_rate": 4.5e-05, "loss": 1.6939, "step": 9 }, { "epoch": 0.1565217391304348, "eval_loss": 1.5777380466461182, "eval_runtime": 2.6751, "eval_samples_per_second": 36.26, "eval_steps_per_second": 4.86, "step": 9 }, { "epoch": 0.20869565217391303, "grad_norm": 0.3947656452655792, "learning_rate": 4.993910125649561e-05, "loss": 1.7722, "step": 12 }, { "epoch": 0.2608695652173913, "grad_norm": 0.4697956442832947, "learning_rate": 4.962019382530521e-05, "loss": 1.6984, "step": 15 }, { "epoch": 0.3130434782608696, "grad_norm": 0.34927043318748474, "learning_rate": 4.9031542398457974e-05, "loss": 1.7339, "step": 18 }, { "epoch": 0.3130434782608696, "eval_loss": 1.4486348628997803, "eval_runtime": 2.683, "eval_samples_per_second": 36.153, "eval_steps_per_second": 4.845, "step": 18 }, { "epoch": 0.3652173913043478, "grad_norm": 0.4831562042236328, "learning_rate": 4.817959636416969e-05, "loss": 1.7264, "step": 21 }, { "epoch": 0.41739130434782606, "grad_norm": 0.56829833984375, "learning_rate": 4.707368982147318e-05, "loss": 1.7197, "step": 24 }, { "epoch": 0.46956521739130436, "grad_norm": 0.5362353920936584, "learning_rate": 4.572593931387604e-05, "loss": 1.5556, "step": 27 }, { "epoch": 0.46956521739130436, "eval_loss": 1.2610658407211304, "eval_runtime": 2.6707, "eval_samples_per_second": 36.32, "eval_steps_per_second": 4.868, "step": 27 }, { "epoch": 0.5217391304347826, "grad_norm": 0.5250989198684692, "learning_rate": 4.415111107797445e-05, "loss": 1.5176, "step": 30 }, { "epoch": 0.5739130434782609, "grad_norm": 0.4496021866798401, "learning_rate": 4.2366459261474933e-05, "loss": 1.2318, "step": 33 }, { "epoch": 0.6260869565217392, "grad_norm": 0.5903450846672058, "learning_rate": 4.039153688314145e-05, "loss": 1.5067, "step": 36 }, { "epoch": 0.6260869565217392, "eval_loss": 1.09618079662323, "eval_runtime": 2.705, "eval_samples_per_second": 35.859, "eval_steps_per_second": 4.806, "step": 36 }, { "epoch": 0.6782608695652174, "grad_norm": 0.5637620687484741, "learning_rate": 3.824798160583012e-05, "loss": 1.2329, "step": 39 }, { "epoch": 0.7304347826086957, "grad_norm": 0.5909444689750671, "learning_rate": 3.5959278669726935e-05, "loss": 1.1362, "step": 42 }, { "epoch": 0.782608695652174, "grad_norm": 0.4471411406993866, "learning_rate": 3.355050358314172e-05, "loss": 1.3341, "step": 45 }, { "epoch": 0.782608695652174, "eval_loss": 1.0226554870605469, "eval_runtime": 2.6701, "eval_samples_per_second": 36.328, "eval_steps_per_second": 4.869, "step": 45 }, { "epoch": 0.8347826086956521, "grad_norm": 0.4761001169681549, "learning_rate": 3.104804738999169e-05, "loss": 1.0345, "step": 48 }, { "epoch": 0.8869565217391304, "grad_norm": 0.4806910753250122, "learning_rate": 2.8479327524001636e-05, "loss": 0.9969, "step": 51 }, { "epoch": 0.9391304347826087, "grad_norm": 0.6003803014755249, "learning_rate": 2.587248741756253e-05, "loss": 1.2715, "step": 54 }, { "epoch": 0.9391304347826087, "eval_loss": 0.9930722117424011, "eval_runtime": 2.6757, "eval_samples_per_second": 36.252, "eval_steps_per_second": 4.859, "step": 54 }, { "epoch": 0.991304347826087, "grad_norm": 0.5368942618370056, "learning_rate": 2.3256088156396868e-05, "loss": 1.1923, "step": 57 }, { "epoch": 1.0434782608695652, "grad_norm": 0.5342758893966675, "learning_rate": 2.0658795558326743e-05, "loss": 1.0875, "step": 60 }, { "epoch": 1.0956521739130434, "grad_norm": 0.4160667359828949, "learning_rate": 1.8109066104575023e-05, "loss": 1.3013, "step": 63 }, { "epoch": 1.0956521739130434, "eval_loss": 0.9771125912666321, "eval_runtime": 2.6789, "eval_samples_per_second": 36.209, "eval_steps_per_second": 4.853, "step": 63 }, { "epoch": 1.1478260869565218, "grad_norm": 0.38854989409446716, "learning_rate": 1.56348351646022e-05, "loss": 1.4154, "step": 66 }, { "epoch": 1.2, "grad_norm": 0.5519515872001648, "learning_rate": 1.3263210930352737e-05, "loss": 1.1699, "step": 69 }, { "epoch": 1.2521739130434781, "grad_norm": 0.3825100362300873, "learning_rate": 1.1020177413231334e-05, "loss": 1.188, "step": 72 }, { "epoch": 1.2521739130434781, "eval_loss": 0.9695695638656616, "eval_runtime": 2.6795, "eval_samples_per_second": 36.201, "eval_steps_per_second": 4.852, "step": 72 }, { "epoch": 1.3043478260869565, "grad_norm": 0.47509416937828064, "learning_rate": 8.930309757836517e-06, "loss": 1.2668, "step": 75 }, { "epoch": 1.3565217391304347, "grad_norm": 0.36375659704208374, "learning_rate": 7.016504991533726e-06, "loss": 1.3312, "step": 78 }, { "epoch": 1.4086956521739131, "grad_norm": 0.5056440830230713, "learning_rate": 5.299731159831953e-06, "loss": 1.2621, "step": 81 }, { "epoch": 1.4086956521739131, "eval_loss": 0.9655399918556213, "eval_runtime": 2.671, "eval_samples_per_second": 36.316, "eval_steps_per_second": 4.867, "step": 81 }, { "epoch": 1.4608695652173913, "grad_norm": 0.48886847496032715, "learning_rate": 3.798797596089351e-06, "loss": 0.968, "step": 84 }, { "epoch": 1.5130434782608697, "grad_norm": 0.44092217087745667, "learning_rate": 2.5301488425208296e-06, "loss": 1.467, "step": 87 }, { "epoch": 1.5652173913043477, "grad_norm": 0.426158607006073, "learning_rate": 1.5076844803522922e-06, "loss": 1.3827, "step": 90 }, { "epoch": 1.5652173913043477, "eval_loss": 0.9642850756645203, "eval_runtime": 2.6781, "eval_samples_per_second": 36.22, "eval_steps_per_second": 4.854, "step": 90 }, { "epoch": 1.617391304347826, "grad_norm": 0.4980301558971405, "learning_rate": 7.426068431000882e-07, "loss": 1.164, "step": 93 }, { "epoch": 1.6695652173913045, "grad_norm": 0.4769861102104187, "learning_rate": 2.4329828146074095e-07, "loss": 0.7116, "step": 96 }, { "epoch": 1.7217391304347827, "grad_norm": 0.4263245463371277, "learning_rate": 1.522932452260595e-08, "loss": 1.272, "step": 99 }, { "epoch": 1.7217391304347827, "eval_loss": 0.9631911516189575, "eval_runtime": 2.7065, "eval_samples_per_second": 35.84, "eval_steps_per_second": 4.803, "step": 99 } ], "logging_steps": 3, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 9, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2.6378466323595264e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }