{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.0017534246575342466, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 8.767123287671233e-05, "grad_norm": 17.826961517333984, "learning_rate": 1e-05, "loss": 4.9034, "step": 1 }, { "epoch": 8.767123287671233e-05, "eval_loss": 5.864974498748779, "eval_runtime": 4180.0734, "eval_samples_per_second": 9.192, "eval_steps_per_second": 4.596, "step": 1 }, { "epoch": 0.00017534246575342467, "grad_norm": 15.42636489868164, "learning_rate": 2e-05, "loss": 4.5406, "step": 2 }, { "epoch": 0.00026301369863013696, "grad_norm": 15.912243843078613, "learning_rate": 3e-05, "loss": 4.8244, "step": 3 }, { "epoch": 0.00035068493150684934, "grad_norm": 15.158151626586914, "learning_rate": 4e-05, "loss": 4.8005, "step": 4 }, { "epoch": 0.00043835616438356166, "grad_norm": 14.64368724822998, "learning_rate": 5e-05, "loss": 4.4149, "step": 5 }, { "epoch": 0.00043835616438356166, "eval_loss": 4.748353958129883, "eval_runtime": 4179.9748, "eval_samples_per_second": 9.192, "eval_steps_per_second": 4.596, "step": 5 }, { "epoch": 0.0005260273972602739, "grad_norm": 12.26352310180664, "learning_rate": 6e-05, "loss": 4.1608, "step": 6 }, { "epoch": 0.0006136986301369863, "grad_norm": 8.713861465454102, "learning_rate": 7e-05, "loss": 3.7209, "step": 7 }, { "epoch": 0.0007013698630136987, "grad_norm": 8.520092964172363, "learning_rate": 8e-05, "loss": 3.3179, "step": 8 }, { "epoch": 0.0007890410958904109, "grad_norm": 8.435450553894043, "learning_rate": 9e-05, "loss": 2.7645, "step": 9 }, { "epoch": 0.0008767123287671233, "grad_norm": 7.701727390289307, "learning_rate": 0.0001, "loss": 2.4773, "step": 10 }, { "epoch": 0.0008767123287671233, "eval_loss": 2.0398104190826416, "eval_runtime": 4178.7123, "eval_samples_per_second": 9.195, "eval_steps_per_second": 4.597, "step": 10 }, { "epoch": 0.0009643835616438356, "grad_norm": 9.178277969360352, "learning_rate": 9.755282581475769e-05, "loss": 2.2672, "step": 11 }, { "epoch": 0.0010520547945205478, "grad_norm": 6.568050861358643, "learning_rate": 9.045084971874738e-05, "loss": 2.0439, "step": 12 }, { "epoch": 0.0011397260273972602, "grad_norm": 4.214171409606934, "learning_rate": 7.938926261462366e-05, "loss": 1.3993, "step": 13 }, { "epoch": 0.0012273972602739726, "grad_norm": 3.7734668254852295, "learning_rate": 6.545084971874738e-05, "loss": 1.5691, "step": 14 }, { "epoch": 0.001315068493150685, "grad_norm": 3.103121280670166, "learning_rate": 5e-05, "loss": 1.4274, "step": 15 }, { "epoch": 0.001315068493150685, "eval_loss": 1.1212583780288696, "eval_runtime": 4181.543, "eval_samples_per_second": 9.188, "eval_steps_per_second": 4.594, "step": 15 }, { "epoch": 0.0014027397260273973, "grad_norm": 2.817159414291382, "learning_rate": 3.4549150281252636e-05, "loss": 1.3368, "step": 16 }, { "epoch": 0.0014904109589041095, "grad_norm": 3.2762200832366943, "learning_rate": 2.061073738537635e-05, "loss": 1.6302, "step": 17 }, { "epoch": 0.0015780821917808219, "grad_norm": 3.8624162673950195, "learning_rate": 9.549150281252633e-06, "loss": 1.3912, "step": 18 }, { "epoch": 0.0016657534246575342, "grad_norm": 3.024617910385132, "learning_rate": 2.4471741852423237e-06, "loss": 1.4863, "step": 19 }, { "epoch": 0.0017534246575342466, "grad_norm": 2.6845686435699463, "learning_rate": 0.0, "loss": 1.4299, "step": 20 }, { "epoch": 0.0017534246575342466, "eval_loss": 1.0315130949020386, "eval_runtime": 4189.8673, "eval_samples_per_second": 9.17, "eval_steps_per_second": 4.585, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.15352688132096e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }