{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.0326530612244898, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0016326530612244899, "grad_norm": 3.9275944232940674, "learning_rate": 1e-05, "loss": 15.4062, "step": 1 }, { "epoch": 0.0016326530612244899, "eval_loss": 1.0398951768875122, "eval_runtime": 113.6849, "eval_samples_per_second": 9.078, "eval_steps_per_second": 4.539, "step": 1 }, { "epoch": 0.0032653061224489797, "grad_norm": 3.9902234077453613, "learning_rate": 2e-05, "loss": 16.2773, "step": 2 }, { "epoch": 0.004897959183673469, "grad_norm": 4.447629928588867, "learning_rate": 3e-05, "loss": 16.8359, "step": 3 }, { "epoch": 0.006530612244897959, "grad_norm": 3.6420984268188477, "learning_rate": 4e-05, "loss": 15.5117, "step": 4 }, { "epoch": 0.00816326530612245, "grad_norm": 4.099905967712402, "learning_rate": 5e-05, "loss": 16.7422, "step": 5 }, { "epoch": 0.00816326530612245, "eval_loss": 1.0193041563034058, "eval_runtime": 36.7376, "eval_samples_per_second": 28.091, "eval_steps_per_second": 14.046, "step": 5 }, { "epoch": 0.009795918367346938, "grad_norm": 4.01804256439209, "learning_rate": 6e-05, "loss": 15.8086, "step": 6 }, { "epoch": 0.011428571428571429, "grad_norm": 4.213345050811768, "learning_rate": 7e-05, "loss": 16.1016, "step": 7 }, { "epoch": 0.013061224489795919, "grad_norm": 4.3585381507873535, "learning_rate": 8e-05, "loss": 15.5273, "step": 8 }, { "epoch": 0.014693877551020407, "grad_norm": 4.254825592041016, "learning_rate": 9e-05, "loss": 15.5898, "step": 9 }, { "epoch": 0.0163265306122449, "grad_norm": 4.836613655090332, "learning_rate": 0.0001, "loss": 15.2812, "step": 10 }, { "epoch": 0.0163265306122449, "eval_loss": 0.9403388500213623, "eval_runtime": 37.0488, "eval_samples_per_second": 27.855, "eval_steps_per_second": 13.928, "step": 10 }, { "epoch": 0.017959183673469388, "grad_norm": 3.9569554328918457, "learning_rate": 9.755282581475769e-05, "loss": 13.3281, "step": 11 }, { "epoch": 0.019591836734693877, "grad_norm": 4.2396979331970215, "learning_rate": 9.045084971874738e-05, "loss": 14.9336, "step": 12 }, { "epoch": 0.02122448979591837, "grad_norm": 3.776480197906494, "learning_rate": 7.938926261462366e-05, "loss": 13.7148, "step": 13 }, { "epoch": 0.022857142857142857, "grad_norm": 3.915137767791748, "learning_rate": 6.545084971874738e-05, "loss": 13.293, "step": 14 }, { "epoch": 0.024489795918367346, "grad_norm": 5.005204677581787, "learning_rate": 5e-05, "loss": 15.0547, "step": 15 }, { "epoch": 0.024489795918367346, "eval_loss": 0.8632168769836426, "eval_runtime": 36.7318, "eval_samples_per_second": 28.096, "eval_steps_per_second": 14.048, "step": 15 }, { "epoch": 0.026122448979591838, "grad_norm": 4.279781341552734, "learning_rate": 3.4549150281252636e-05, "loss": 14.0078, "step": 16 }, { "epoch": 0.027755102040816326, "grad_norm": 4.18499231338501, "learning_rate": 2.061073738537635e-05, "loss": 12.4355, "step": 17 }, { "epoch": 0.029387755102040815, "grad_norm": 4.630531311035156, "learning_rate": 9.549150281252633e-06, "loss": 12.957, "step": 18 }, { "epoch": 0.031020408163265307, "grad_norm": 4.716670513153076, "learning_rate": 2.4471741852423237e-06, "loss": 13.0, "step": 19 }, { "epoch": 0.0326530612244898, "grad_norm": 5.805507183074951, "learning_rate": 0.0, "loss": 14.8984, "step": 20 }, { "epoch": 0.0326530612244898, "eval_loss": 0.8439657688140869, "eval_runtime": 36.6541, "eval_samples_per_second": 28.155, "eval_steps_per_second": 14.078, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2.802881264615424e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }