{ "best_metric": 0.533891499042511, "best_model_checkpoint": "./output_v2/7b_cluster09_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_09/checkpoint-200", "epoch": 0.2466091245376079, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "learning_rate": 0.0002, "loss": 0.597, "step": 10 }, { "epoch": 0.02, "learning_rate": 0.0002, "loss": 0.5778, "step": 20 }, { "epoch": 0.04, "learning_rate": 0.0002, "loss": 0.5677, "step": 30 }, { "epoch": 0.05, "learning_rate": 0.0002, "loss": 0.5528, "step": 40 }, { "epoch": 0.06, "learning_rate": 0.0002, "loss": 0.5558, "step": 50 }, { "epoch": 0.07, "learning_rate": 0.0002, "loss": 0.5571, "step": 60 }, { "epoch": 0.09, "learning_rate": 0.0002, "loss": 0.5499, "step": 70 }, { "epoch": 0.1, "learning_rate": 0.0002, "loss": 0.5491, "step": 80 }, { "epoch": 0.11, "learning_rate": 0.0002, "loss": 0.5407, "step": 90 }, { "epoch": 0.12, "learning_rate": 0.0002, "loss": 0.5492, "step": 100 }, { "epoch": 0.14, "learning_rate": 0.0002, "loss": 0.5258, "step": 110 }, { "epoch": 0.15, "learning_rate": 0.0002, "loss": 0.5217, "step": 120 }, { "epoch": 0.16, "learning_rate": 0.0002, "loss": 0.538, "step": 130 }, { "epoch": 0.17, "learning_rate": 0.0002, "loss": 0.5265, "step": 140 }, { "epoch": 0.18, "learning_rate": 0.0002, "loss": 0.5344, "step": 150 }, { "epoch": 0.2, "learning_rate": 0.0002, "loss": 0.5361, "step": 160 }, { "epoch": 0.21, "learning_rate": 0.0002, "loss": 0.5186, "step": 170 }, { "epoch": 0.22, "learning_rate": 0.0002, "loss": 0.5312, "step": 180 }, { "epoch": 0.23, "learning_rate": 0.0002, "loss": 0.5395, "step": 190 }, { "epoch": 0.25, "learning_rate": 0.0002, "loss": 0.5399, "step": 200 }, { "epoch": 0.25, "eval_loss": 0.533891499042511, "eval_runtime": 249.6236, "eval_samples_per_second": 4.006, "eval_steps_per_second": 2.003, "step": 200 }, { "epoch": 0.25, "mmlu_eval_accuracy": 0.46207163729626294, "mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091, "mmlu_eval_accuracy_anatomy": 0.6428571428571429, "mmlu_eval_accuracy_astronomy": 0.375, "mmlu_eval_accuracy_business_ethics": 0.5454545454545454, "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, "mmlu_eval_accuracy_college_biology": 0.4375, "mmlu_eval_accuracy_college_chemistry": 0.125, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, "mmlu_eval_accuracy_college_medicine": 0.36363636363636365, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.36363636363636365, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.16666666666666666, "mmlu_eval_accuracy_electrical_engineering": 0.375, "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536, "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.34375, "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666, "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112, "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723, "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333, "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273, "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, "mmlu_eval_accuracy_international_law": 0.6923076923076923, "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.7272727272727273, "mmlu_eval_accuracy_marketing": 0.72, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, "mmlu_eval_accuracy_moral_disputes": 0.5, "mmlu_eval_accuracy_moral_scenarios": 0.24, "mmlu_eval_accuracy_nutrition": 0.6060606060606061, "mmlu_eval_accuracy_philosophy": 0.47058823529411764, "mmlu_eval_accuracy_prehistory": 0.4857142857142857, "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903, "mmlu_eval_accuracy_professional_law": 0.34705882352941175, "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856, "mmlu_eval_accuracy_public_relations": 0.4166666666666667, "mmlu_eval_accuracy_security_studies": 0.48148148148148145, "mmlu_eval_accuracy_sociology": 0.6363636363636364, "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, "mmlu_eval_accuracy_virology": 0.3888888888888889, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 1.267449478023046, "step": 200 } ], "max_steps": 5000, "num_train_epochs": 7, "total_flos": 5.415259545826099e+16, "trial_name": null, "trial_params": null }