{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.002784658850283977, "eval_steps": 8, "global_step": 30, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 9.282196167613258e-05, "grad_norm": 0.014820126816630363, "learning_rate": 1e-05, "loss": 11.9313, "step": 1 }, { "epoch": 9.282196167613258e-05, "eval_loss": 11.928583145141602, "eval_runtime": 139.8719, "eval_samples_per_second": 129.726, "eval_steps_per_second": 64.866, "step": 1 }, { "epoch": 0.00018564392335226516, "grad_norm": 0.01485100481659174, "learning_rate": 2e-05, "loss": 11.9297, "step": 2 }, { "epoch": 0.0002784658850283977, "grad_norm": 0.015872972086071968, "learning_rate": 3e-05, "loss": 11.9286, "step": 3 }, { "epoch": 0.0003712878467045303, "grad_norm": 0.01684979908168316, "learning_rate": 4e-05, "loss": 11.9287, "step": 4 }, { "epoch": 0.00046410980838066285, "grad_norm": 0.01272808201611042, "learning_rate": 5e-05, "loss": 11.9293, "step": 5 }, { "epoch": 0.0005569317700567954, "grad_norm": 0.018846917897462845, "learning_rate": 6e-05, "loss": 11.9299, "step": 6 }, { "epoch": 0.000649753731732928, "grad_norm": 0.013215523213148117, "learning_rate": 7e-05, "loss": 11.9287, "step": 7 }, { "epoch": 0.0007425756934090606, "grad_norm": 0.016623178496956825, "learning_rate": 8e-05, "loss": 11.9304, "step": 8 }, { "epoch": 0.0007425756934090606, "eval_loss": 11.928462982177734, "eval_runtime": 115.0141, "eval_samples_per_second": 157.763, "eval_steps_per_second": 78.886, "step": 8 }, { "epoch": 0.0008353976550851931, "grad_norm": 0.012540486641228199, "learning_rate": 9e-05, "loss": 11.9294, "step": 9 }, { "epoch": 0.0009282196167613257, "grad_norm": 0.016163840889930725, "learning_rate": 0.0001, "loss": 11.9286, "step": 10 }, { "epoch": 0.0010210415784374584, "grad_norm": 0.018371539190411568, "learning_rate": 9.938441702975689e-05, "loss": 11.9273, "step": 11 }, { "epoch": 0.0011138635401135909, "grad_norm": 0.016105543822050095, "learning_rate": 9.755282581475769e-05, "loss": 11.9294, "step": 12 }, { "epoch": 0.0012066855017897234, "grad_norm": 0.012495412491261959, "learning_rate": 9.45503262094184e-05, "loss": 11.929, "step": 13 }, { "epoch": 0.001299507463465856, "grad_norm": 0.014130703173577785, "learning_rate": 9.045084971874738e-05, "loss": 11.9288, "step": 14 }, { "epoch": 0.0013923294251419885, "grad_norm": 0.011071882210671902, "learning_rate": 8.535533905932738e-05, "loss": 11.9295, "step": 15 }, { "epoch": 0.0014851513868181212, "grad_norm": 0.015319784171879292, "learning_rate": 7.938926261462366e-05, "loss": 11.93, "step": 16 }, { "epoch": 0.0014851513868181212, "eval_loss": 11.928154945373535, "eval_runtime": 115.181, "eval_samples_per_second": 157.535, "eval_steps_per_second": 78.772, "step": 16 }, { "epoch": 0.0015779733484942537, "grad_norm": 0.012164338491857052, "learning_rate": 7.269952498697734e-05, "loss": 11.9297, "step": 17 }, { "epoch": 0.0016707953101703862, "grad_norm": 0.017325541004538536, "learning_rate": 6.545084971874738e-05, "loss": 11.9274, "step": 18 }, { "epoch": 0.001763617271846519, "grad_norm": 0.01545996405184269, "learning_rate": 5.782172325201155e-05, "loss": 11.9279, "step": 19 }, { "epoch": 0.0018564392335226514, "grad_norm": 0.012894785962998867, "learning_rate": 5e-05, "loss": 11.9308, "step": 20 }, { "epoch": 0.001949261195198784, "grad_norm": 0.01422932744026184, "learning_rate": 4.2178276747988446e-05, "loss": 11.929, "step": 21 }, { "epoch": 0.002042083156874917, "grad_norm": 0.011517844162881374, "learning_rate": 3.4549150281252636e-05, "loss": 11.9292, "step": 22 }, { "epoch": 0.002134905118551049, "grad_norm": 0.0101248100399971, "learning_rate": 2.7300475013022663e-05, "loss": 11.9274, "step": 23 }, { "epoch": 0.0022277270802271818, "grad_norm": 0.015307254157960415, "learning_rate": 2.061073738537635e-05, "loss": 11.9269, "step": 24 }, { "epoch": 0.0022277270802271818, "eval_loss": 11.927979469299316, "eval_runtime": 115.0963, "eval_samples_per_second": 157.651, "eval_steps_per_second": 78.83, "step": 24 }, { "epoch": 0.0023205490419033145, "grad_norm": 0.011979633010923862, "learning_rate": 1.4644660940672627e-05, "loss": 11.9303, "step": 25 }, { "epoch": 0.0024133710035794467, "grad_norm": 0.011392545886337757, "learning_rate": 9.549150281252633e-06, "loss": 11.9293, "step": 26 }, { "epoch": 0.0025061929652555794, "grad_norm": 0.012941389344632626, "learning_rate": 5.449673790581611e-06, "loss": 11.9301, "step": 27 }, { "epoch": 0.002599014926931712, "grad_norm": 0.01663537509739399, "learning_rate": 2.4471741852423237e-06, "loss": 11.9283, "step": 28 }, { "epoch": 0.002691836888607845, "grad_norm": 0.016010815277695656, "learning_rate": 6.15582970243117e-07, "loss": 11.929, "step": 29 }, { "epoch": 0.002784658850283977, "grad_norm": 0.01459334883838892, "learning_rate": 0.0, "loss": 11.9295, "step": 30 } ], "logging_steps": 1, "max_steps": 30, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 8, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 183994810368.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }