{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.00487685930260912, "eval_steps": 5, "global_step": 15, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0003251239535072746, "grad_norm": 0.024652473628520966, "learning_rate": 1e-05, "loss": 11.7645, "step": 1 }, { "epoch": 0.0003251239535072746, "eval_loss": 11.76447582244873, "eval_runtime": 276.626, "eval_samples_per_second": 37.455, "eval_steps_per_second": 18.729, "step": 1 }, { "epoch": 0.0006502479070145493, "grad_norm": 0.027672940865159035, "learning_rate": 2e-05, "loss": 11.7658, "step": 2 }, { "epoch": 0.000975371860521824, "grad_norm": 0.025643622502684593, "learning_rate": 3e-05, "loss": 11.7649, "step": 3 }, { "epoch": 0.0013004958140290985, "grad_norm": 0.025678519159555435, "learning_rate": 4e-05, "loss": 11.7648, "step": 4 }, { "epoch": 0.0016256197675363732, "grad_norm": 0.02572811394929886, "learning_rate": 5e-05, "loss": 11.7649, "step": 5 }, { "epoch": 0.0016256197675363732, "eval_loss": 11.764395713806152, "eval_runtime": 276.5749, "eval_samples_per_second": 37.462, "eval_steps_per_second": 18.733, "step": 5 }, { "epoch": 0.001950743721043648, "grad_norm": 0.026358768343925476, "learning_rate": 6e-05, "loss": 11.7634, "step": 6 }, { "epoch": 0.0022758676745509225, "grad_norm": 0.026500564068555832, "learning_rate": 7e-05, "loss": 11.7654, "step": 7 }, { "epoch": 0.002600991628058197, "grad_norm": 0.02642131596803665, "learning_rate": 8e-05, "loss": 11.7644, "step": 8 }, { "epoch": 0.002926115581565472, "grad_norm": 0.02585265040397644, "learning_rate": 9e-05, "loss": 11.7658, "step": 9 }, { "epoch": 0.0032512395350727465, "grad_norm": 0.024854907765984535, "learning_rate": 0.0001, "loss": 11.7641, "step": 10 }, { "epoch": 0.0032512395350727465, "eval_loss": 11.76411247253418, "eval_runtime": 276.5048, "eval_samples_per_second": 37.471, "eval_steps_per_second": 18.737, "step": 10 }, { "epoch": 0.003576363488580021, "grad_norm": 0.02757527120411396, "learning_rate": 9.755282581475769e-05, "loss": 11.7652, "step": 11 }, { "epoch": 0.003901487442087296, "grad_norm": 0.028088003396987915, "learning_rate": 9.045084971874738e-05, "loss": 11.7641, "step": 12 }, { "epoch": 0.00422661139559457, "grad_norm": 0.02876427210867405, "learning_rate": 7.938926261462366e-05, "loss": 11.7642, "step": 13 }, { "epoch": 0.004551735349101845, "grad_norm": 0.027249282225966454, "learning_rate": 6.545084971874738e-05, "loss": 11.7638, "step": 14 }, { "epoch": 0.00487685930260912, "grad_norm": 0.025040265172719955, "learning_rate": 5e-05, "loss": 11.763, "step": 15 }, { "epoch": 0.00487685930260912, "eval_loss": 11.763741493225098, "eval_runtime": 276.3837, "eval_samples_per_second": 37.488, "eval_steps_per_second": 18.746, "step": 15 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 48888010506240.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }