{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.413265306122449, "eval_steps": 9, "global_step": 81, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00510204081632653, "eval_loss": 6.939616680145264, "eval_runtime": 1.0293, "eval_samples_per_second": 160.296, "eval_steps_per_second": 40.803, "step": 1 }, { "epoch": 0.015306122448979591, "grad_norm": 0.4716786742210388, "learning_rate": 3e-05, "loss": 6.9394, "step": 3 }, { "epoch": 0.030612244897959183, "grad_norm": 0.49812349677085876, "learning_rate": 6e-05, "loss": 6.9356, "step": 6 }, { "epoch": 0.04591836734693878, "grad_norm": 0.5289084911346436, "learning_rate": 9e-05, "loss": 6.933, "step": 9 }, { "epoch": 0.04591836734693878, "eval_loss": 6.927786350250244, "eval_runtime": 0.2695, "eval_samples_per_second": 612.282, "eval_steps_per_second": 155.854, "step": 9 }, { "epoch": 0.061224489795918366, "grad_norm": 0.4804115891456604, "learning_rate": 9.987820251299122e-05, "loss": 6.9266, "step": 12 }, { "epoch": 0.07653061224489796, "grad_norm": 0.5177415013313293, "learning_rate": 9.924038765061042e-05, "loss": 6.9163, "step": 15 }, { "epoch": 0.09183673469387756, "grad_norm": 0.5697883367538452, "learning_rate": 9.806308479691595e-05, "loss": 6.9044, "step": 18 }, { "epoch": 0.09183673469387756, "eval_loss": 6.897478103637695, "eval_runtime": 0.271, "eval_samples_per_second": 608.897, "eval_steps_per_second": 154.992, "step": 18 }, { "epoch": 0.10714285714285714, "grad_norm": 0.5388941168785095, "learning_rate": 9.635919272833938e-05, "loss": 6.8956, "step": 21 }, { "epoch": 0.12244897959183673, "grad_norm": 0.5204723477363586, "learning_rate": 9.414737964294636e-05, "loss": 6.885, "step": 24 }, { "epoch": 0.1377551020408163, "grad_norm": 0.4605754613876343, "learning_rate": 9.145187862775209e-05, "loss": 6.8733, "step": 27 }, { "epoch": 0.1377551020408163, "eval_loss": 6.866656303405762, "eval_runtime": 0.2715, "eval_samples_per_second": 607.839, "eval_steps_per_second": 154.723, "step": 27 }, { "epoch": 0.15306122448979592, "grad_norm": 0.4325270652770996, "learning_rate": 8.83022221559489e-05, "loss": 6.8621, "step": 30 }, { "epoch": 0.1683673469387755, "grad_norm": 0.3710213303565979, "learning_rate": 8.473291852294987e-05, "loss": 6.8547, "step": 33 }, { "epoch": 0.1836734693877551, "grad_norm": 0.321429967880249, "learning_rate": 8.07830737662829e-05, "loss": 6.8474, "step": 36 }, { "epoch": 0.1836734693877551, "eval_loss": 6.846508979797363, "eval_runtime": 0.2698, "eval_samples_per_second": 611.451, "eval_steps_per_second": 155.642, "step": 36 }, { "epoch": 0.1989795918367347, "grad_norm": 0.28444114327430725, "learning_rate": 7.649596321166024e-05, "loss": 6.8474, "step": 39 }, { "epoch": 0.21428571428571427, "grad_norm": 0.28362318873405457, "learning_rate": 7.191855733945387e-05, "loss": 6.8428, "step": 42 }, { "epoch": 0.22959183673469388, "grad_norm": 0.23609378933906555, "learning_rate": 6.710100716628344e-05, "loss": 6.8412, "step": 45 }, { "epoch": 0.22959183673469388, "eval_loss": 6.8371405601501465, "eval_runtime": 0.2731, "eval_samples_per_second": 604.065, "eval_steps_per_second": 153.762, "step": 45 }, { "epoch": 0.24489795918367346, "grad_norm": 0.25101956725120544, "learning_rate": 6.209609477998338e-05, "loss": 6.8369, "step": 48 }, { "epoch": 0.2602040816326531, "grad_norm": 0.21246355772018433, "learning_rate": 5.695865504800327e-05, "loss": 6.8376, "step": 51 }, { "epoch": 0.2755102040816326, "grad_norm": 0.20616650581359863, "learning_rate": 5.174497483512506e-05, "loss": 6.8328, "step": 54 }, { "epoch": 0.2755102040816326, "eval_loss": 6.832831859588623, "eval_runtime": 0.2703, "eval_samples_per_second": 610.543, "eval_steps_per_second": 155.411, "step": 54 }, { "epoch": 0.29081632653061223, "grad_norm": 0.22721412777900696, "learning_rate": 4.6512176312793736e-05, "loss": 6.8316, "step": 57 }, { "epoch": 0.30612244897959184, "grad_norm": 0.2554455101490021, "learning_rate": 4.131759111665349e-05, "loss": 6.8269, "step": 60 }, { "epoch": 0.32142857142857145, "grad_norm": 0.1824101209640503, "learning_rate": 3.6218132209150045e-05, "loss": 6.8329, "step": 63 }, { "epoch": 0.32142857142857145, "eval_loss": 6.830348014831543, "eval_runtime": 0.2768, "eval_samples_per_second": 596.042, "eval_steps_per_second": 151.72, "step": 63 }, { "epoch": 0.336734693877551, "grad_norm": 0.21916939318180084, "learning_rate": 3.12696703292044e-05, "loss": 6.829, "step": 66 }, { "epoch": 0.3520408163265306, "grad_norm": 0.23701028525829315, "learning_rate": 2.6526421860705473e-05, "loss": 6.8313, "step": 69 }, { "epoch": 0.3673469387755102, "grad_norm": 0.18295414745807648, "learning_rate": 2.2040354826462668e-05, "loss": 6.8278, "step": 72 }, { "epoch": 0.3673469387755102, "eval_loss": 6.82876443862915, "eval_runtime": 0.2784, "eval_samples_per_second": 592.608, "eval_steps_per_second": 150.846, "step": 72 }, { "epoch": 0.3826530612244898, "grad_norm": 0.20278629660606384, "learning_rate": 1.7860619515673033e-05, "loss": 6.8285, "step": 75 }, { "epoch": 0.3979591836734694, "grad_norm": 0.19427503645420074, "learning_rate": 1.4033009983067452e-05, "loss": 6.8339, "step": 78 }, { "epoch": 0.413265306122449, "grad_norm": 0.2242947667837143, "learning_rate": 1.0599462319663905e-05, "loss": 6.8312, "step": 81 }, { "epoch": 0.413265306122449, "eval_loss": 6.827848434448242, "eval_runtime": 0.2684, "eval_samples_per_second": 614.721, "eval_steps_per_second": 156.474, "step": 81 } ], "logging_steps": 3, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 9, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 778513711104.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }