{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.02536354413256679, "eval_steps": 6, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0005072708826513358, "grad_norm": NaN, "learning_rate": 1e-05, "loss": 0.0, "step": 1 }, { "epoch": 0.0005072708826513358, "eval_loss": NaN, "eval_runtime": 264.9243, "eval_samples_per_second": 9.403, "eval_steps_per_second": 2.352, "step": 1 }, { "epoch": 0.0010145417653026716, "grad_norm": NaN, "learning_rate": 2e-05, "loss": 0.0, "step": 2 }, { "epoch": 0.0015218126479540075, "grad_norm": NaN, "learning_rate": 3e-05, "loss": 0.0, "step": 3 }, { "epoch": 0.002029083530605343, "grad_norm": NaN, "learning_rate": 4e-05, "loss": 0.0, "step": 4 }, { "epoch": 0.002536354413256679, "grad_norm": NaN, "learning_rate": 5e-05, "loss": 0.0, "step": 5 }, { "epoch": 0.003043625295908015, "grad_norm": NaN, "learning_rate": 6e-05, "loss": 0.0, "step": 6 }, { "epoch": 0.003043625295908015, "eval_loss": NaN, "eval_runtime": 264.9935, "eval_samples_per_second": 9.4, "eval_steps_per_second": 2.351, "step": 6 }, { "epoch": 0.003550896178559351, "grad_norm": NaN, "learning_rate": 7e-05, "loss": 0.0, "step": 7 }, { "epoch": 0.004058167061210686, "grad_norm": NaN, "learning_rate": 8e-05, "loss": 0.0, "step": 8 }, { "epoch": 0.004565437943862023, "grad_norm": NaN, "learning_rate": 9e-05, "loss": 0.0, "step": 9 }, { "epoch": 0.005072708826513358, "grad_norm": NaN, "learning_rate": 0.0001, "loss": 0.0, "step": 10 }, { "epoch": 0.005579979709164694, "grad_norm": NaN, "learning_rate": 9.98458666866564e-05, "loss": 0.0, "step": 11 }, { "epoch": 0.00608725059181603, "grad_norm": NaN, "learning_rate": 9.938441702975689e-05, "loss": 0.0, "step": 12 }, { "epoch": 0.00608725059181603, "eval_loss": NaN, "eval_runtime": 264.2404, "eval_samples_per_second": 9.427, "eval_steps_per_second": 2.358, "step": 12 }, { "epoch": 0.0065945214744673655, "grad_norm": NaN, "learning_rate": 9.861849601988383e-05, "loss": 0.0, "step": 13 }, { "epoch": 0.007101792357118702, "grad_norm": NaN, "learning_rate": 9.755282581475769e-05, "loss": 0.0, "step": 14 }, { "epoch": 0.007609063239770037, "grad_norm": NaN, "learning_rate": 9.619397662556435e-05, "loss": 0.0, "step": 15 }, { "epoch": 0.008116334122421373, "grad_norm": NaN, "learning_rate": 9.45503262094184e-05, "loss": 0.0, "step": 16 }, { "epoch": 0.008623605005072709, "grad_norm": NaN, "learning_rate": 9.263200821770461e-05, "loss": 0.0, "step": 17 }, { "epoch": 0.009130875887724045, "grad_norm": NaN, "learning_rate": 9.045084971874738e-05, "loss": 0.0, "step": 18 }, { "epoch": 0.009130875887724045, "eval_loss": NaN, "eval_runtime": 263.7037, "eval_samples_per_second": 9.446, "eval_steps_per_second": 2.363, "step": 18 }, { "epoch": 0.00963814677037538, "grad_norm": NaN, "learning_rate": 8.802029828000156e-05, "loss": 0.0, "step": 19 }, { "epoch": 0.010145417653026716, "grad_norm": NaN, "learning_rate": 8.535533905932738e-05, "loss": 0.0, "step": 20 }, { "epoch": 0.010652688535678053, "grad_norm": NaN, "learning_rate": 8.247240241650918e-05, "loss": 0.0, "step": 21 }, { "epoch": 0.011159959418329387, "grad_norm": NaN, "learning_rate": 7.938926261462366e-05, "loss": 0.0, "step": 22 }, { "epoch": 0.011667230300980724, "grad_norm": NaN, "learning_rate": 7.612492823579745e-05, "loss": 0.0, "step": 23 }, { "epoch": 0.01217450118363206, "grad_norm": NaN, "learning_rate": 7.269952498697734e-05, "loss": 0.0, "step": 24 }, { "epoch": 0.01217450118363206, "eval_loss": NaN, "eval_runtime": 264.6687, "eval_samples_per_second": 9.412, "eval_steps_per_second": 2.354, "step": 24 }, { "epoch": 0.012681772066283395, "grad_norm": NaN, "learning_rate": 6.91341716182545e-05, "loss": 0.0, "step": 25 }, { "epoch": 0.013189042948934731, "grad_norm": NaN, "learning_rate": 6.545084971874738e-05, "loss": 0.0, "step": 26 }, { "epoch": 0.013696313831586067, "grad_norm": NaN, "learning_rate": 6.167226819279528e-05, "loss": 0.0, "step": 27 }, { "epoch": 0.014203584714237404, "grad_norm": NaN, "learning_rate": 5.782172325201155e-05, "loss": 0.0, "step": 28 }, { "epoch": 0.014710855596888738, "grad_norm": NaN, "learning_rate": 5.392295478639225e-05, "loss": 0.0, "step": 29 }, { "epoch": 0.015218126479540075, "grad_norm": NaN, "learning_rate": 5e-05, "loss": 0.0, "step": 30 }, { "epoch": 0.015218126479540075, "eval_loss": NaN, "eval_runtime": 264.1256, "eval_samples_per_second": 9.431, "eval_steps_per_second": 2.359, "step": 30 }, { "epoch": 0.01572539736219141, "grad_norm": NaN, "learning_rate": 4.607704521360776e-05, "loss": 0.0, "step": 31 }, { "epoch": 0.016232668244842745, "grad_norm": NaN, "learning_rate": 4.2178276747988446e-05, "loss": 0.0, "step": 32 }, { "epoch": 0.016739939127494084, "grad_norm": NaN, "learning_rate": 3.832773180720475e-05, "loss": 0.0, "step": 33 }, { "epoch": 0.017247210010145418, "grad_norm": NaN, "learning_rate": 3.4549150281252636e-05, "loss": 0.0, "step": 34 }, { "epoch": 0.017754480892796753, "grad_norm": NaN, "learning_rate": 3.086582838174551e-05, "loss": 0.0, "step": 35 }, { "epoch": 0.01826175177544809, "grad_norm": NaN, "learning_rate": 2.7300475013022663e-05, "loss": 0.0, "step": 36 }, { "epoch": 0.01826175177544809, "eval_loss": NaN, "eval_runtime": 264.4171, "eval_samples_per_second": 9.421, "eval_steps_per_second": 2.356, "step": 36 }, { "epoch": 0.018769022658099425, "grad_norm": NaN, "learning_rate": 2.3875071764202563e-05, "loss": 0.0, "step": 37 }, { "epoch": 0.01927629354075076, "grad_norm": NaN, "learning_rate": 2.061073738537635e-05, "loss": 0.0, "step": 38 }, { "epoch": 0.019783564423402098, "grad_norm": NaN, "learning_rate": 1.7527597583490822e-05, "loss": 0.0, "step": 39 }, { "epoch": 0.020290835306053433, "grad_norm": NaN, "learning_rate": 1.4644660940672627e-05, "loss": 0.0, "step": 40 }, { "epoch": 0.020798106188704767, "grad_norm": NaN, "learning_rate": 1.1979701719998453e-05, "loss": 0.0, "step": 41 }, { "epoch": 0.021305377071356105, "grad_norm": NaN, "learning_rate": 9.549150281252633e-06, "loss": 0.0, "step": 42 }, { "epoch": 0.021305377071356105, "eval_loss": NaN, "eval_runtime": 264.6238, "eval_samples_per_second": 9.413, "eval_steps_per_second": 2.354, "step": 42 }, { "epoch": 0.02181264795400744, "grad_norm": NaN, "learning_rate": 7.367991782295391e-06, "loss": 0.0, "step": 43 }, { "epoch": 0.022319918836658775, "grad_norm": NaN, "learning_rate": 5.449673790581611e-06, "loss": 0.0, "step": 44 }, { "epoch": 0.022827189719310113, "grad_norm": NaN, "learning_rate": 3.8060233744356633e-06, "loss": 0.0, "step": 45 }, { "epoch": 0.023334460601961447, "grad_norm": NaN, "learning_rate": 2.4471741852423237e-06, "loss": 0.0, "step": 46 }, { "epoch": 0.023841731484612782, "grad_norm": NaN, "learning_rate": 1.3815039801161721e-06, "loss": 0.0, "step": 47 }, { "epoch": 0.02434900236726412, "grad_norm": NaN, "learning_rate": 6.15582970243117e-07, "loss": 0.0, "step": 48 }, { "epoch": 0.02434900236726412, "eval_loss": NaN, "eval_runtime": 264.7641, "eval_samples_per_second": 9.408, "eval_steps_per_second": 2.353, "step": 48 }, { "epoch": 0.024856273249915455, "grad_norm": NaN, "learning_rate": 1.5413331334360182e-07, "loss": 0.0, "step": 49 }, { "epoch": 0.02536354413256679, "grad_norm": NaN, "learning_rate": 0.0, "loss": 0.0, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.119015678246912e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }