Farouk commited on
Commit
b34e90f
Β·
1 Parent(s): 0b1f583

Training in progress, step 5200

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40afea012205ce6a110a13ee18b7e55ebd54322919f9009c2663c81244eb8897
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3480b87b6488421f8a0621b9b69ebbb90cf04f55e5c24a276d7b3371a4d4bd9
3
  size 319977229
checkpoint-3000/adapter_model/adapter_model/README.md CHANGED
@@ -103,6 +103,17 @@ The following `bitsandbytes` quantization config was used during training:
103
  - bnb_4bit_use_double_quant: True
104
  - bnb_4bit_compute_dtype: bfloat16
105
 
 
 
 
 
 
 
 
 
 
 
 
106
  The following `bitsandbytes` quantization config was used during training:
107
  - load_in_8bit: False
108
  - load_in_4bit: True
@@ -124,5 +135,6 @@ The following `bitsandbytes` quantization config was used during training:
124
  - PEFT 0.4.0
125
  - PEFT 0.4.0
126
  - PEFT 0.4.0
 
127
 
128
  - PEFT 0.4.0
 
103
  - bnb_4bit_use_double_quant: True
104
  - bnb_4bit_compute_dtype: bfloat16
105
 
106
+ The following `bitsandbytes` quantization config was used during training:
107
+ - load_in_8bit: False
108
+ - load_in_4bit: True
109
+ - llm_int8_threshold: 6.0
110
+ - llm_int8_skip_modules: None
111
+ - llm_int8_enable_fp32_cpu_offload: False
112
+ - llm_int8_has_fp16_weight: False
113
+ - bnb_4bit_quant_type: nf4
114
+ - bnb_4bit_use_double_quant: True
115
+ - bnb_4bit_compute_dtype: bfloat16
116
+
117
  The following `bitsandbytes` quantization config was used during training:
118
  - load_in_8bit: False
119
  - load_in_4bit: True
 
135
  - PEFT 0.4.0
136
  - PEFT 0.4.0
137
  - PEFT 0.4.0
138
+ - PEFT 0.4.0
139
 
140
  - PEFT 0.4.0
checkpoint-3000/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a72c1b300d77dc7ad31caec2658ed53028ff9cb977e4448c7749a2dd0b3c1b41
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40afea012205ce6a110a13ee18b7e55ebd54322919f9009c2663c81244eb8897
3
  size 319977229
{checkpoint-3200 β†’ checkpoint-5200}/README.md RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/adapter_config.json RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0844c1552367d8cd58ff885430bedcf906de72eb83c89824d6f95ed3ec5893a
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3480b87b6488421f8a0621b9b69ebbb90cf04f55e5c24a276d7b3371a4d4bd9
3
  size 319977229
{checkpoint-3200 β†’ checkpoint-5200}/added_tokens.json RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:526fbbfdf6531c23543834b00f2b1ca24307908435ac1a39f65d96f339aa6136
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42049281eb2a1bdac1029fe71ff0feccae14324df0eeee02f8b9f1f45a6615f5
3
  size 1279539973
{checkpoint-3200 β†’ checkpoint-5200}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d755f5160f7821ddabb461b471df881a8597ba14fd384388cef3ecb8011a6250
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f4c779014b11e05adaaa2455e19f98472adaead685d5f64a937f43f73a7460e
3
  size 14511
{checkpoint-3200 β†’ checkpoint-5200}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50b57a34df83b700e2c13775ff734b4569b74ce7e20da3479db76577bb4e906e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfeaf01effb5693db43ee13e167cd0d73532fb4b4a2468aeb23c02cf097773a8
3
  size 627
{checkpoint-3200 β†’ checkpoint-5200}/special_tokens_map.json RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/tokenizer.model RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/tokenizer_config.json RENAMED
File without changes
{checkpoint-3200 β†’ checkpoint-5200}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.7343361377716064,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-3000",
4
- "epoch": 1.0139416983523448,
5
- "global_step": 3200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -3062,11 +3062,1921 @@
3062
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
3063
  "mmlu_loss": 1.4613330938486144,
3064
  "step": 3200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3065
  }
3066
  ],
3067
  "max_steps": 10000,
3068
  "num_train_epochs": 4,
3069
- "total_flos": 9.707385618054513e+17,
3070
  "trial_name": null,
3071
  "trial_params": null
3072
  }
 
1
  {
2
  "best_metric": 0.7343361377716064,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-3000",
4
+ "epoch": 1.6476552598225602,
5
+ "global_step": 5200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
3062
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
3063
  "mmlu_loss": 1.4613330938486144,
3064
  "step": 3200
3065
+ },
3066
+ {
3067
+ "epoch": 1.02,
3068
+ "learning_rate": 0.0002,
3069
+ "loss": 0.664,
3070
+ "step": 3210
3071
+ },
3072
+ {
3073
+ "epoch": 1.02,
3074
+ "learning_rate": 0.0002,
3075
+ "loss": 0.6673,
3076
+ "step": 3220
3077
+ },
3078
+ {
3079
+ "epoch": 1.02,
3080
+ "learning_rate": 0.0002,
3081
+ "loss": 0.703,
3082
+ "step": 3230
3083
+ },
3084
+ {
3085
+ "epoch": 1.03,
3086
+ "learning_rate": 0.0002,
3087
+ "loss": 0.763,
3088
+ "step": 3240
3089
+ },
3090
+ {
3091
+ "epoch": 1.03,
3092
+ "learning_rate": 0.0002,
3093
+ "loss": 0.6587,
3094
+ "step": 3250
3095
+ },
3096
+ {
3097
+ "epoch": 1.03,
3098
+ "learning_rate": 0.0002,
3099
+ "loss": 0.6725,
3100
+ "step": 3260
3101
+ },
3102
+ {
3103
+ "epoch": 1.04,
3104
+ "learning_rate": 0.0002,
3105
+ "loss": 0.7518,
3106
+ "step": 3270
3107
+ },
3108
+ {
3109
+ "epoch": 1.04,
3110
+ "learning_rate": 0.0002,
3111
+ "loss": 0.7182,
3112
+ "step": 3280
3113
+ },
3114
+ {
3115
+ "epoch": 1.04,
3116
+ "learning_rate": 0.0002,
3117
+ "loss": 0.6655,
3118
+ "step": 3290
3119
+ },
3120
+ {
3121
+ "epoch": 1.05,
3122
+ "learning_rate": 0.0002,
3123
+ "loss": 0.6333,
3124
+ "step": 3300
3125
+ },
3126
+ {
3127
+ "epoch": 1.05,
3128
+ "learning_rate": 0.0002,
3129
+ "loss": 0.6699,
3130
+ "step": 3310
3131
+ },
3132
+ {
3133
+ "epoch": 1.05,
3134
+ "learning_rate": 0.0002,
3135
+ "loss": 0.659,
3136
+ "step": 3320
3137
+ },
3138
+ {
3139
+ "epoch": 1.06,
3140
+ "learning_rate": 0.0002,
3141
+ "loss": 0.7138,
3142
+ "step": 3330
3143
+ },
3144
+ {
3145
+ "epoch": 1.06,
3146
+ "learning_rate": 0.0002,
3147
+ "loss": 0.7309,
3148
+ "step": 3340
3149
+ },
3150
+ {
3151
+ "epoch": 1.06,
3152
+ "learning_rate": 0.0002,
3153
+ "loss": 0.7251,
3154
+ "step": 3350
3155
+ },
3156
+ {
3157
+ "epoch": 1.06,
3158
+ "learning_rate": 0.0002,
3159
+ "loss": 0.6712,
3160
+ "step": 3360
3161
+ },
3162
+ {
3163
+ "epoch": 1.07,
3164
+ "learning_rate": 0.0002,
3165
+ "loss": 0.6527,
3166
+ "step": 3370
3167
+ },
3168
+ {
3169
+ "epoch": 1.07,
3170
+ "learning_rate": 0.0002,
3171
+ "loss": 0.7752,
3172
+ "step": 3380
3173
+ },
3174
+ {
3175
+ "epoch": 1.07,
3176
+ "learning_rate": 0.0002,
3177
+ "loss": 0.6896,
3178
+ "step": 3390
3179
+ },
3180
+ {
3181
+ "epoch": 1.08,
3182
+ "learning_rate": 0.0002,
3183
+ "loss": 0.7441,
3184
+ "step": 3400
3185
+ },
3186
+ {
3187
+ "epoch": 1.08,
3188
+ "eval_loss": 0.7388539910316467,
3189
+ "eval_runtime": 111.0879,
3190
+ "eval_samples_per_second": 9.002,
3191
+ "eval_steps_per_second": 4.501,
3192
+ "step": 3400
3193
+ },
3194
+ {
3195
+ "epoch": 1.08,
3196
+ "mmlu_eval_accuracy": 0.49153955280819217,
3197
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
3198
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
3199
+ "mmlu_eval_accuracy_astronomy": 0.4375,
3200
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
3201
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
3202
+ "mmlu_eval_accuracy_college_biology": 0.375,
3203
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
3204
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
3205
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
3206
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
3207
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
3208
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
3209
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
3210
+ "mmlu_eval_accuracy_econometrics": 0.25,
3211
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
3212
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
3213
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
3214
+ "mmlu_eval_accuracy_global_facts": 0.5,
3215
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
3216
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
3217
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
3218
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
3219
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
3220
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
3221
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
3222
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
3223
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
3224
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
3225
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
3226
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
3227
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
3228
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
3229
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
3230
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
3231
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
3232
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
3233
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
3234
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
3235
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
3236
+ "mmlu_eval_accuracy_marketing": 0.88,
3237
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
3238
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
3239
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
3240
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
3241
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
3242
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
3243
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
3244
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
3245
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
3246
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
3247
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
3248
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
3249
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
3250
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
3251
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
3252
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
3253
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
3254
+ "mmlu_loss": 1.3683368821990707,
3255
+ "step": 3400
3256
+ },
3257
+ {
3258
+ "epoch": 1.08,
3259
+ "learning_rate": 0.0002,
3260
+ "loss": 0.723,
3261
+ "step": 3410
3262
+ },
3263
+ {
3264
+ "epoch": 1.08,
3265
+ "learning_rate": 0.0002,
3266
+ "loss": 0.7545,
3267
+ "step": 3420
3268
+ },
3269
+ {
3270
+ "epoch": 1.09,
3271
+ "learning_rate": 0.0002,
3272
+ "loss": 0.6885,
3273
+ "step": 3430
3274
+ },
3275
+ {
3276
+ "epoch": 1.09,
3277
+ "learning_rate": 0.0002,
3278
+ "loss": 0.7021,
3279
+ "step": 3440
3280
+ },
3281
+ {
3282
+ "epoch": 1.09,
3283
+ "learning_rate": 0.0002,
3284
+ "loss": 0.7284,
3285
+ "step": 3450
3286
+ },
3287
+ {
3288
+ "epoch": 1.1,
3289
+ "learning_rate": 0.0002,
3290
+ "loss": 0.6811,
3291
+ "step": 3460
3292
+ },
3293
+ {
3294
+ "epoch": 1.1,
3295
+ "learning_rate": 0.0002,
3296
+ "loss": 0.7076,
3297
+ "step": 3470
3298
+ },
3299
+ {
3300
+ "epoch": 1.1,
3301
+ "learning_rate": 0.0002,
3302
+ "loss": 0.7074,
3303
+ "step": 3480
3304
+ },
3305
+ {
3306
+ "epoch": 1.11,
3307
+ "learning_rate": 0.0002,
3308
+ "loss": 0.6734,
3309
+ "step": 3490
3310
+ },
3311
+ {
3312
+ "epoch": 1.11,
3313
+ "learning_rate": 0.0002,
3314
+ "loss": 0.7243,
3315
+ "step": 3500
3316
+ },
3317
+ {
3318
+ "epoch": 1.11,
3319
+ "learning_rate": 0.0002,
3320
+ "loss": 0.7347,
3321
+ "step": 3510
3322
+ },
3323
+ {
3324
+ "epoch": 1.12,
3325
+ "learning_rate": 0.0002,
3326
+ "loss": 0.6888,
3327
+ "step": 3520
3328
+ },
3329
+ {
3330
+ "epoch": 1.12,
3331
+ "learning_rate": 0.0002,
3332
+ "loss": 0.7332,
3333
+ "step": 3530
3334
+ },
3335
+ {
3336
+ "epoch": 1.12,
3337
+ "learning_rate": 0.0002,
3338
+ "loss": 0.7117,
3339
+ "step": 3540
3340
+ },
3341
+ {
3342
+ "epoch": 1.12,
3343
+ "learning_rate": 0.0002,
3344
+ "loss": 0.6575,
3345
+ "step": 3550
3346
+ },
3347
+ {
3348
+ "epoch": 1.13,
3349
+ "learning_rate": 0.0002,
3350
+ "loss": 0.729,
3351
+ "step": 3560
3352
+ },
3353
+ {
3354
+ "epoch": 1.13,
3355
+ "learning_rate": 0.0002,
3356
+ "loss": 0.6825,
3357
+ "step": 3570
3358
+ },
3359
+ {
3360
+ "epoch": 1.13,
3361
+ "learning_rate": 0.0002,
3362
+ "loss": 0.6935,
3363
+ "step": 3580
3364
+ },
3365
+ {
3366
+ "epoch": 1.14,
3367
+ "learning_rate": 0.0002,
3368
+ "loss": 0.7004,
3369
+ "step": 3590
3370
+ },
3371
+ {
3372
+ "epoch": 1.14,
3373
+ "learning_rate": 0.0002,
3374
+ "loss": 0.7237,
3375
+ "step": 3600
3376
+ },
3377
+ {
3378
+ "epoch": 1.14,
3379
+ "eval_loss": 0.7381147742271423,
3380
+ "eval_runtime": 111.0101,
3381
+ "eval_samples_per_second": 9.008,
3382
+ "eval_steps_per_second": 4.504,
3383
+ "step": 3600
3384
+ },
3385
+ {
3386
+ "epoch": 1.14,
3387
+ "mmlu_eval_accuracy": 0.49167050353968145,
3388
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
3389
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
3390
+ "mmlu_eval_accuracy_astronomy": 0.4375,
3391
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
3392
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
3393
+ "mmlu_eval_accuracy_college_biology": 0.5,
3394
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
3395
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
3396
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
3397
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
3398
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
3399
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
3400
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
3401
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
3402
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
3403
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
3404
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
3405
+ "mmlu_eval_accuracy_global_facts": 0.5,
3406
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
3407
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
3408
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
3409
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
3410
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
3411
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
3412
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
3413
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
3414
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
3415
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
3416
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
3417
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
3418
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
3419
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
3420
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
3421
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
3422
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
3423
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
3424
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
3425
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
3426
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
3427
+ "mmlu_eval_accuracy_marketing": 0.84,
3428
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
3429
+ "mmlu_eval_accuracy_miscellaneous": 0.7209302325581395,
3430
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
3431
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
3432
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
3433
+ "mmlu_eval_accuracy_philosophy": 0.5,
3434
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
3435
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
3436
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
3437
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
3438
+ "mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
3439
+ "mmlu_eval_accuracy_public_relations": 0.5,
3440
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
3441
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
3442
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
3443
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
3444
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
3445
+ "mmlu_loss": 1.5044772917545806,
3446
+ "step": 3600
3447
+ },
3448
+ {
3449
+ "epoch": 1.14,
3450
+ "learning_rate": 0.0002,
3451
+ "loss": 0.7361,
3452
+ "step": 3610
3453
+ },
3454
+ {
3455
+ "epoch": 1.15,
3456
+ "learning_rate": 0.0002,
3457
+ "loss": 0.7179,
3458
+ "step": 3620
3459
+ },
3460
+ {
3461
+ "epoch": 1.15,
3462
+ "learning_rate": 0.0002,
3463
+ "loss": 0.7499,
3464
+ "step": 3630
3465
+ },
3466
+ {
3467
+ "epoch": 1.15,
3468
+ "learning_rate": 0.0002,
3469
+ "loss": 0.7319,
3470
+ "step": 3640
3471
+ },
3472
+ {
3473
+ "epoch": 1.16,
3474
+ "learning_rate": 0.0002,
3475
+ "loss": 0.7104,
3476
+ "step": 3650
3477
+ },
3478
+ {
3479
+ "epoch": 1.16,
3480
+ "learning_rate": 0.0002,
3481
+ "loss": 0.6892,
3482
+ "step": 3660
3483
+ },
3484
+ {
3485
+ "epoch": 1.16,
3486
+ "learning_rate": 0.0002,
3487
+ "loss": 0.7666,
3488
+ "step": 3670
3489
+ },
3490
+ {
3491
+ "epoch": 1.17,
3492
+ "learning_rate": 0.0002,
3493
+ "loss": 0.632,
3494
+ "step": 3680
3495
+ },
3496
+ {
3497
+ "epoch": 1.17,
3498
+ "learning_rate": 0.0002,
3499
+ "loss": 0.713,
3500
+ "step": 3690
3501
+ },
3502
+ {
3503
+ "epoch": 1.17,
3504
+ "learning_rate": 0.0002,
3505
+ "loss": 0.6958,
3506
+ "step": 3700
3507
+ },
3508
+ {
3509
+ "epoch": 1.18,
3510
+ "learning_rate": 0.0002,
3511
+ "loss": 0.7253,
3512
+ "step": 3710
3513
+ },
3514
+ {
3515
+ "epoch": 1.18,
3516
+ "learning_rate": 0.0002,
3517
+ "loss": 0.7608,
3518
+ "step": 3720
3519
+ },
3520
+ {
3521
+ "epoch": 1.18,
3522
+ "learning_rate": 0.0002,
3523
+ "loss": 0.7277,
3524
+ "step": 3730
3525
+ },
3526
+ {
3527
+ "epoch": 1.19,
3528
+ "learning_rate": 0.0002,
3529
+ "loss": 0.7346,
3530
+ "step": 3740
3531
+ },
3532
+ {
3533
+ "epoch": 1.19,
3534
+ "learning_rate": 0.0002,
3535
+ "loss": 0.7075,
3536
+ "step": 3750
3537
+ },
3538
+ {
3539
+ "epoch": 1.19,
3540
+ "learning_rate": 0.0002,
3541
+ "loss": 0.6278,
3542
+ "step": 3760
3543
+ },
3544
+ {
3545
+ "epoch": 1.19,
3546
+ "learning_rate": 0.0002,
3547
+ "loss": 0.7088,
3548
+ "step": 3770
3549
+ },
3550
+ {
3551
+ "epoch": 1.2,
3552
+ "learning_rate": 0.0002,
3553
+ "loss": 0.7667,
3554
+ "step": 3780
3555
+ },
3556
+ {
3557
+ "epoch": 1.2,
3558
+ "learning_rate": 0.0002,
3559
+ "loss": 0.7051,
3560
+ "step": 3790
3561
+ },
3562
+ {
3563
+ "epoch": 1.2,
3564
+ "learning_rate": 0.0002,
3565
+ "loss": 0.699,
3566
+ "step": 3800
3567
+ },
3568
+ {
3569
+ "epoch": 1.2,
3570
+ "eval_loss": 0.7395787239074707,
3571
+ "eval_runtime": 110.7949,
3572
+ "eval_samples_per_second": 9.026,
3573
+ "eval_steps_per_second": 4.513,
3574
+ "step": 3800
3575
+ },
3576
+ {
3577
+ "epoch": 1.2,
3578
+ "mmlu_eval_accuracy": 0.48410138439418055,
3579
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
3580
+ "mmlu_eval_accuracy_anatomy": 0.5,
3581
+ "mmlu_eval_accuracy_astronomy": 0.4375,
3582
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
3583
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
3584
+ "mmlu_eval_accuracy_college_biology": 0.4375,
3585
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
3586
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
3587
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
3588
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
3589
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
3590
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
3591
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
3592
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
3593
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
3594
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
3595
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
3596
+ "mmlu_eval_accuracy_global_facts": 0.4,
3597
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
3598
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
3599
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
3600
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
3601
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
3602
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.7619047619047619,
3603
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
3604
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
3605
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
3606
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
3607
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
3608
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
3609
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
3610
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
3611
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
3612
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
3613
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
3614
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
3615
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
3616
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
3617
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
3618
+ "mmlu_eval_accuracy_marketing": 0.8,
3619
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
3620
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
3621
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
3622
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
3623
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
3624
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
3625
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
3626
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
3627
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
3628
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
3629
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
3630
+ "mmlu_eval_accuracy_public_relations": 0.5,
3631
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
3632
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
3633
+ "mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091,
3634
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
3635
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
3636
+ "mmlu_loss": 1.414894336547615,
3637
+ "step": 3800
3638
+ },
3639
+ {
3640
+ "epoch": 1.21,
3641
+ "learning_rate": 0.0002,
3642
+ "loss": 0.6892,
3643
+ "step": 3810
3644
+ },
3645
+ {
3646
+ "epoch": 1.21,
3647
+ "learning_rate": 0.0002,
3648
+ "loss": 0.6753,
3649
+ "step": 3820
3650
+ },
3651
+ {
3652
+ "epoch": 1.21,
3653
+ "learning_rate": 0.0002,
3654
+ "loss": 0.6998,
3655
+ "step": 3830
3656
+ },
3657
+ {
3658
+ "epoch": 1.22,
3659
+ "learning_rate": 0.0002,
3660
+ "loss": 0.686,
3661
+ "step": 3840
3662
+ },
3663
+ {
3664
+ "epoch": 1.22,
3665
+ "learning_rate": 0.0002,
3666
+ "loss": 0.7254,
3667
+ "step": 3850
3668
+ },
3669
+ {
3670
+ "epoch": 1.22,
3671
+ "learning_rate": 0.0002,
3672
+ "loss": 0.6942,
3673
+ "step": 3860
3674
+ },
3675
+ {
3676
+ "epoch": 1.23,
3677
+ "learning_rate": 0.0002,
3678
+ "loss": 0.6729,
3679
+ "step": 3870
3680
+ },
3681
+ {
3682
+ "epoch": 1.23,
3683
+ "learning_rate": 0.0002,
3684
+ "loss": 0.7486,
3685
+ "step": 3880
3686
+ },
3687
+ {
3688
+ "epoch": 1.23,
3689
+ "learning_rate": 0.0002,
3690
+ "loss": 0.6997,
3691
+ "step": 3890
3692
+ },
3693
+ {
3694
+ "epoch": 1.24,
3695
+ "learning_rate": 0.0002,
3696
+ "loss": 0.7308,
3697
+ "step": 3900
3698
+ },
3699
+ {
3700
+ "epoch": 1.24,
3701
+ "learning_rate": 0.0002,
3702
+ "loss": 0.7214,
3703
+ "step": 3910
3704
+ },
3705
+ {
3706
+ "epoch": 1.24,
3707
+ "learning_rate": 0.0002,
3708
+ "loss": 0.6879,
3709
+ "step": 3920
3710
+ },
3711
+ {
3712
+ "epoch": 1.25,
3713
+ "learning_rate": 0.0002,
3714
+ "loss": 0.6662,
3715
+ "step": 3930
3716
+ },
3717
+ {
3718
+ "epoch": 1.25,
3719
+ "learning_rate": 0.0002,
3720
+ "loss": 0.7045,
3721
+ "step": 3940
3722
+ },
3723
+ {
3724
+ "epoch": 1.25,
3725
+ "learning_rate": 0.0002,
3726
+ "loss": 0.7908,
3727
+ "step": 3950
3728
+ },
3729
+ {
3730
+ "epoch": 1.25,
3731
+ "learning_rate": 0.0002,
3732
+ "loss": 0.72,
3733
+ "step": 3960
3734
+ },
3735
+ {
3736
+ "epoch": 1.26,
3737
+ "learning_rate": 0.0002,
3738
+ "loss": 0.6646,
3739
+ "step": 3970
3740
+ },
3741
+ {
3742
+ "epoch": 1.26,
3743
+ "learning_rate": 0.0002,
3744
+ "loss": 0.7421,
3745
+ "step": 3980
3746
+ },
3747
+ {
3748
+ "epoch": 1.26,
3749
+ "learning_rate": 0.0002,
3750
+ "loss": 0.7489,
3751
+ "step": 3990
3752
+ },
3753
+ {
3754
+ "epoch": 1.27,
3755
+ "learning_rate": 0.0002,
3756
+ "loss": 0.7082,
3757
+ "step": 4000
3758
+ },
3759
+ {
3760
+ "epoch": 1.27,
3761
+ "eval_loss": 0.7381725907325745,
3762
+ "eval_runtime": 111.1345,
3763
+ "eval_samples_per_second": 8.998,
3764
+ "eval_steps_per_second": 4.499,
3765
+ "step": 4000
3766
+ },
3767
+ {
3768
+ "epoch": 1.27,
3769
+ "mmlu_eval_accuracy": 0.48533511185669687,
3770
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
3771
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
3772
+ "mmlu_eval_accuracy_astronomy": 0.4375,
3773
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
3774
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
3775
+ "mmlu_eval_accuracy_college_biology": 0.375,
3776
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
3777
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
3778
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
3779
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
3780
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
3781
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
3782
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
3783
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
3784
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
3785
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
3786
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
3787
+ "mmlu_eval_accuracy_global_facts": 0.6,
3788
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
3789
+ "mmlu_eval_accuracy_high_school_chemistry": 0.13636363636363635,
3790
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
3791
+ "mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
3792
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
3793
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.7142857142857143,
3794
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
3795
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
3796
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
3797
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
3798
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
3799
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
3800
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
3801
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
3802
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
3803
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
3804
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
3805
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
3806
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
3807
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
3808
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
3809
+ "mmlu_eval_accuracy_marketing": 0.8,
3810
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
3811
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
3812
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
3813
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
3814
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
3815
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
3816
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
3817
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
3818
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
3819
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
3820
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
3821
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
3822
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
3823
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
3824
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
3825
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
3826
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
3827
+ "mmlu_loss": 1.3075970206652858,
3828
+ "step": 4000
3829
+ },
3830
+ {
3831
+ "epoch": 1.27,
3832
+ "learning_rate": 0.0002,
3833
+ "loss": 0.6578,
3834
+ "step": 4010
3835
+ },
3836
+ {
3837
+ "epoch": 1.27,
3838
+ "learning_rate": 0.0002,
3839
+ "loss": 0.7462,
3840
+ "step": 4020
3841
+ },
3842
+ {
3843
+ "epoch": 1.28,
3844
+ "learning_rate": 0.0002,
3845
+ "loss": 0.699,
3846
+ "step": 4030
3847
+ },
3848
+ {
3849
+ "epoch": 1.28,
3850
+ "learning_rate": 0.0002,
3851
+ "loss": 0.7144,
3852
+ "step": 4040
3853
+ },
3854
+ {
3855
+ "epoch": 1.28,
3856
+ "learning_rate": 0.0002,
3857
+ "loss": 0.6771,
3858
+ "step": 4050
3859
+ },
3860
+ {
3861
+ "epoch": 1.29,
3862
+ "learning_rate": 0.0002,
3863
+ "loss": 0.7198,
3864
+ "step": 4060
3865
+ },
3866
+ {
3867
+ "epoch": 1.29,
3868
+ "learning_rate": 0.0002,
3869
+ "loss": 0.6848,
3870
+ "step": 4070
3871
+ },
3872
+ {
3873
+ "epoch": 1.29,
3874
+ "learning_rate": 0.0002,
3875
+ "loss": 0.762,
3876
+ "step": 4080
3877
+ },
3878
+ {
3879
+ "epoch": 1.3,
3880
+ "learning_rate": 0.0002,
3881
+ "loss": 0.7354,
3882
+ "step": 4090
3883
+ },
3884
+ {
3885
+ "epoch": 1.3,
3886
+ "learning_rate": 0.0002,
3887
+ "loss": 0.6529,
3888
+ "step": 4100
3889
+ },
3890
+ {
3891
+ "epoch": 1.3,
3892
+ "learning_rate": 0.0002,
3893
+ "loss": 0.6373,
3894
+ "step": 4110
3895
+ },
3896
+ {
3897
+ "epoch": 1.31,
3898
+ "learning_rate": 0.0002,
3899
+ "loss": 0.7415,
3900
+ "step": 4120
3901
+ },
3902
+ {
3903
+ "epoch": 1.31,
3904
+ "learning_rate": 0.0002,
3905
+ "loss": 0.6646,
3906
+ "step": 4130
3907
+ },
3908
+ {
3909
+ "epoch": 1.31,
3910
+ "learning_rate": 0.0002,
3911
+ "loss": 0.6904,
3912
+ "step": 4140
3913
+ },
3914
+ {
3915
+ "epoch": 1.31,
3916
+ "learning_rate": 0.0002,
3917
+ "loss": 0.7462,
3918
+ "step": 4150
3919
+ },
3920
+ {
3921
+ "epoch": 1.32,
3922
+ "learning_rate": 0.0002,
3923
+ "loss": 0.7261,
3924
+ "step": 4160
3925
+ },
3926
+ {
3927
+ "epoch": 1.32,
3928
+ "learning_rate": 0.0002,
3929
+ "loss": 0.6866,
3930
+ "step": 4170
3931
+ },
3932
+ {
3933
+ "epoch": 1.32,
3934
+ "learning_rate": 0.0002,
3935
+ "loss": 0.6789,
3936
+ "step": 4180
3937
+ },
3938
+ {
3939
+ "epoch": 1.33,
3940
+ "learning_rate": 0.0002,
3941
+ "loss": 0.6943,
3942
+ "step": 4190
3943
+ },
3944
+ {
3945
+ "epoch": 1.33,
3946
+ "learning_rate": 0.0002,
3947
+ "loss": 0.6644,
3948
+ "step": 4200
3949
+ },
3950
+ {
3951
+ "epoch": 1.33,
3952
+ "eval_loss": 0.7391716241836548,
3953
+ "eval_runtime": 111.1279,
3954
+ "eval_samples_per_second": 8.999,
3955
+ "eval_steps_per_second": 4.499,
3956
+ "step": 4200
3957
+ },
3958
+ {
3959
+ "epoch": 1.33,
3960
+ "mmlu_eval_accuracy": 0.48595716946128337,
3961
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
3962
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
3963
+ "mmlu_eval_accuracy_astronomy": 0.4375,
3964
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
3965
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
3966
+ "mmlu_eval_accuracy_college_biology": 0.4375,
3967
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
3968
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
3969
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
3970
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
3971
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
3972
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
3973
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
3974
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
3975
+ "mmlu_eval_accuracy_electrical_engineering": 0.1875,
3976
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
3977
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
3978
+ "mmlu_eval_accuracy_global_facts": 0.4,
3979
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
3980
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
3981
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
3982
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
3983
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
3984
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
3985
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
3986
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
3987
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
3988
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
3989
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
3990
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
3991
+ "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
3992
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
3993
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
3994
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
3995
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
3996
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
3997
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
3998
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
3999
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4000
+ "mmlu_eval_accuracy_marketing": 0.72,
4001
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4002
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
4003
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
4004
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
4005
+ "mmlu_eval_accuracy_nutrition": 0.7272727272727273,
4006
+ "mmlu_eval_accuracy_philosophy": 0.5,
4007
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
4008
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
4009
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
4010
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
4011
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
4012
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
4013
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
4014
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
4015
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
4016
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
4017
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4018
+ "mmlu_loss": 1.2436322906776758,
4019
+ "step": 4200
4020
+ },
4021
+ {
4022
+ "epoch": 1.33,
4023
+ "learning_rate": 0.0002,
4024
+ "loss": 0.7427,
4025
+ "step": 4210
4026
+ },
4027
+ {
4028
+ "epoch": 1.34,
4029
+ "learning_rate": 0.0002,
4030
+ "loss": 0.628,
4031
+ "step": 4220
4032
+ },
4033
+ {
4034
+ "epoch": 1.34,
4035
+ "learning_rate": 0.0002,
4036
+ "loss": 0.6656,
4037
+ "step": 4230
4038
+ },
4039
+ {
4040
+ "epoch": 1.34,
4041
+ "learning_rate": 0.0002,
4042
+ "loss": 0.6631,
4043
+ "step": 4240
4044
+ },
4045
+ {
4046
+ "epoch": 1.35,
4047
+ "learning_rate": 0.0002,
4048
+ "loss": 0.7031,
4049
+ "step": 4250
4050
+ },
4051
+ {
4052
+ "epoch": 1.35,
4053
+ "learning_rate": 0.0002,
4054
+ "loss": 0.7102,
4055
+ "step": 4260
4056
+ },
4057
+ {
4058
+ "epoch": 1.35,
4059
+ "learning_rate": 0.0002,
4060
+ "loss": 0.7077,
4061
+ "step": 4270
4062
+ },
4063
+ {
4064
+ "epoch": 1.36,
4065
+ "learning_rate": 0.0002,
4066
+ "loss": 0.7679,
4067
+ "step": 4280
4068
+ },
4069
+ {
4070
+ "epoch": 1.36,
4071
+ "learning_rate": 0.0002,
4072
+ "loss": 0.6569,
4073
+ "step": 4290
4074
+ },
4075
+ {
4076
+ "epoch": 1.36,
4077
+ "learning_rate": 0.0002,
4078
+ "loss": 0.6911,
4079
+ "step": 4300
4080
+ },
4081
+ {
4082
+ "epoch": 1.37,
4083
+ "learning_rate": 0.0002,
4084
+ "loss": 0.7468,
4085
+ "step": 4310
4086
+ },
4087
+ {
4088
+ "epoch": 1.37,
4089
+ "learning_rate": 0.0002,
4090
+ "loss": 0.6641,
4091
+ "step": 4320
4092
+ },
4093
+ {
4094
+ "epoch": 1.37,
4095
+ "learning_rate": 0.0002,
4096
+ "loss": 0.7248,
4097
+ "step": 4330
4098
+ },
4099
+ {
4100
+ "epoch": 1.38,
4101
+ "learning_rate": 0.0002,
4102
+ "loss": 0.706,
4103
+ "step": 4340
4104
+ },
4105
+ {
4106
+ "epoch": 1.38,
4107
+ "learning_rate": 0.0002,
4108
+ "loss": 0.717,
4109
+ "step": 4350
4110
+ },
4111
+ {
4112
+ "epoch": 1.38,
4113
+ "learning_rate": 0.0002,
4114
+ "loss": 0.6462,
4115
+ "step": 4360
4116
+ },
4117
+ {
4118
+ "epoch": 1.38,
4119
+ "learning_rate": 0.0002,
4120
+ "loss": 0.6752,
4121
+ "step": 4370
4122
+ },
4123
+ {
4124
+ "epoch": 1.39,
4125
+ "learning_rate": 0.0002,
4126
+ "loss": 0.7239,
4127
+ "step": 4380
4128
+ },
4129
+ {
4130
+ "epoch": 1.39,
4131
+ "learning_rate": 0.0002,
4132
+ "loss": 0.6665,
4133
+ "step": 4390
4134
+ },
4135
+ {
4136
+ "epoch": 1.39,
4137
+ "learning_rate": 0.0002,
4138
+ "loss": 0.7077,
4139
+ "step": 4400
4140
+ },
4141
+ {
4142
+ "epoch": 1.39,
4143
+ "eval_loss": 0.7374858260154724,
4144
+ "eval_runtime": 111.3021,
4145
+ "eval_samples_per_second": 8.985,
4146
+ "eval_steps_per_second": 4.492,
4147
+ "step": 4400
4148
+ },
4149
+ {
4150
+ "epoch": 1.39,
4151
+ "mmlu_eval_accuracy": 0.49250240895964725,
4152
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4153
+ "mmlu_eval_accuracy_anatomy": 0.7857142857142857,
4154
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4155
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4156
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
4157
+ "mmlu_eval_accuracy_college_biology": 0.4375,
4158
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4159
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4160
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
4161
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
4162
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4163
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
4164
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4165
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
4166
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4167
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
4168
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4169
+ "mmlu_eval_accuracy_global_facts": 0.6,
4170
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
4171
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
4172
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4173
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
4174
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
4175
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
4176
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
4177
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
4178
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
4179
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
4180
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
4181
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
4182
+ "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
4183
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
4184
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4185
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4186
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4187
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4188
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4189
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
4190
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4191
+ "mmlu_eval_accuracy_marketing": 0.76,
4192
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4193
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
4194
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
4195
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4196
+ "mmlu_eval_accuracy_nutrition": 0.7575757575757576,
4197
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
4198
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
4199
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
4200
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
4201
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
4202
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
4203
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4204
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4205
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
4206
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4207
+ "mmlu_eval_accuracy_virology": 0.5,
4208
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
4209
+ "mmlu_loss": 1.2793094400637455,
4210
+ "step": 4400
4211
+ },
4212
+ {
4213
+ "epoch": 1.4,
4214
+ "learning_rate": 0.0002,
4215
+ "loss": 0.7042,
4216
+ "step": 4410
4217
+ },
4218
+ {
4219
+ "epoch": 1.4,
4220
+ "learning_rate": 0.0002,
4221
+ "loss": 0.7554,
4222
+ "step": 4420
4223
+ },
4224
+ {
4225
+ "epoch": 1.4,
4226
+ "learning_rate": 0.0002,
4227
+ "loss": 0.757,
4228
+ "step": 4430
4229
+ },
4230
+ {
4231
+ "epoch": 1.41,
4232
+ "learning_rate": 0.0002,
4233
+ "loss": 0.7173,
4234
+ "step": 4440
4235
+ },
4236
+ {
4237
+ "epoch": 1.41,
4238
+ "learning_rate": 0.0002,
4239
+ "loss": 0.6655,
4240
+ "step": 4450
4241
+ },
4242
+ {
4243
+ "epoch": 1.41,
4244
+ "learning_rate": 0.0002,
4245
+ "loss": 0.6991,
4246
+ "step": 4460
4247
+ },
4248
+ {
4249
+ "epoch": 1.42,
4250
+ "learning_rate": 0.0002,
4251
+ "loss": 0.7148,
4252
+ "step": 4470
4253
+ },
4254
+ {
4255
+ "epoch": 1.42,
4256
+ "learning_rate": 0.0002,
4257
+ "loss": 0.7085,
4258
+ "step": 4480
4259
+ },
4260
+ {
4261
+ "epoch": 1.42,
4262
+ "learning_rate": 0.0002,
4263
+ "loss": 0.6955,
4264
+ "step": 4490
4265
+ },
4266
+ {
4267
+ "epoch": 1.43,
4268
+ "learning_rate": 0.0002,
4269
+ "loss": 0.7139,
4270
+ "step": 4500
4271
+ },
4272
+ {
4273
+ "epoch": 1.43,
4274
+ "learning_rate": 0.0002,
4275
+ "loss": 0.7262,
4276
+ "step": 4510
4277
+ },
4278
+ {
4279
+ "epoch": 1.43,
4280
+ "learning_rate": 0.0002,
4281
+ "loss": 0.7705,
4282
+ "step": 4520
4283
+ },
4284
+ {
4285
+ "epoch": 1.44,
4286
+ "learning_rate": 0.0002,
4287
+ "loss": 0.7028,
4288
+ "step": 4530
4289
+ },
4290
+ {
4291
+ "epoch": 1.44,
4292
+ "learning_rate": 0.0002,
4293
+ "loss": 0.7146,
4294
+ "step": 4540
4295
+ },
4296
+ {
4297
+ "epoch": 1.44,
4298
+ "learning_rate": 0.0002,
4299
+ "loss": 0.6868,
4300
+ "step": 4550
4301
+ },
4302
+ {
4303
+ "epoch": 1.44,
4304
+ "learning_rate": 0.0002,
4305
+ "loss": 0.6591,
4306
+ "step": 4560
4307
+ },
4308
+ {
4309
+ "epoch": 1.45,
4310
+ "learning_rate": 0.0002,
4311
+ "loss": 0.7019,
4312
+ "step": 4570
4313
+ },
4314
+ {
4315
+ "epoch": 1.45,
4316
+ "learning_rate": 0.0002,
4317
+ "loss": 0.6676,
4318
+ "step": 4580
4319
+ },
4320
+ {
4321
+ "epoch": 1.45,
4322
+ "learning_rate": 0.0002,
4323
+ "loss": 0.7085,
4324
+ "step": 4590
4325
+ },
4326
+ {
4327
+ "epoch": 1.46,
4328
+ "learning_rate": 0.0002,
4329
+ "loss": 0.664,
4330
+ "step": 4600
4331
+ },
4332
+ {
4333
+ "epoch": 1.46,
4334
+ "eval_loss": 0.7358158230781555,
4335
+ "eval_runtime": 111.2934,
4336
+ "eval_samples_per_second": 8.985,
4337
+ "eval_steps_per_second": 4.493,
4338
+ "step": 4600
4339
+ },
4340
+ {
4341
+ "epoch": 1.46,
4342
+ "mmlu_eval_accuracy": 0.4861300261183481,
4343
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4344
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
4345
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4346
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4347
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
4348
+ "mmlu_eval_accuracy_college_biology": 0.375,
4349
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4350
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4351
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
4352
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
4353
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4354
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
4355
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4356
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
4357
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4358
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
4359
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4360
+ "mmlu_eval_accuracy_global_facts": 0.6,
4361
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
4362
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4363
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4364
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
4365
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
4366
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
4367
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
4368
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
4369
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
4370
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
4371
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
4372
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
4373
+ "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
4374
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
4375
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4376
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4377
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4378
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4379
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4380
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
4381
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
4382
+ "mmlu_eval_accuracy_marketing": 0.8,
4383
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4384
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
4385
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
4386
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
4387
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
4388
+ "mmlu_eval_accuracy_philosophy": 0.5,
4389
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
4390
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
4391
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
4392
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
4393
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
4394
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4395
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4396
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
4397
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4398
+ "mmlu_eval_accuracy_virology": 0.5,
4399
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4400
+ "mmlu_loss": 1.3095654961567946,
4401
+ "step": 4600
4402
+ },
4403
+ {
4404
+ "epoch": 1.46,
4405
+ "learning_rate": 0.0002,
4406
+ "loss": 0.6486,
4407
+ "step": 4610
4408
+ },
4409
+ {
4410
+ "epoch": 1.46,
4411
+ "learning_rate": 0.0002,
4412
+ "loss": 0.6999,
4413
+ "step": 4620
4414
+ },
4415
+ {
4416
+ "epoch": 1.47,
4417
+ "learning_rate": 0.0002,
4418
+ "loss": 0.6458,
4419
+ "step": 4630
4420
+ },
4421
+ {
4422
+ "epoch": 1.47,
4423
+ "learning_rate": 0.0002,
4424
+ "loss": 0.6762,
4425
+ "step": 4640
4426
+ },
4427
+ {
4428
+ "epoch": 1.47,
4429
+ "learning_rate": 0.0002,
4430
+ "loss": 0.6924,
4431
+ "step": 4650
4432
+ },
4433
+ {
4434
+ "epoch": 1.48,
4435
+ "learning_rate": 0.0002,
4436
+ "loss": 0.682,
4437
+ "step": 4660
4438
+ },
4439
+ {
4440
+ "epoch": 1.48,
4441
+ "learning_rate": 0.0002,
4442
+ "loss": 0.7081,
4443
+ "step": 4670
4444
+ },
4445
+ {
4446
+ "epoch": 1.48,
4447
+ "learning_rate": 0.0002,
4448
+ "loss": 0.7506,
4449
+ "step": 4680
4450
+ },
4451
+ {
4452
+ "epoch": 1.49,
4453
+ "learning_rate": 0.0002,
4454
+ "loss": 0.7311,
4455
+ "step": 4690
4456
+ },
4457
+ {
4458
+ "epoch": 1.49,
4459
+ "learning_rate": 0.0002,
4460
+ "loss": 0.6463,
4461
+ "step": 4700
4462
+ },
4463
+ {
4464
+ "epoch": 1.49,
4465
+ "learning_rate": 0.0002,
4466
+ "loss": 0.6741,
4467
+ "step": 4710
4468
+ },
4469
+ {
4470
+ "epoch": 1.5,
4471
+ "learning_rate": 0.0002,
4472
+ "loss": 0.6626,
4473
+ "step": 4720
4474
+ },
4475
+ {
4476
+ "epoch": 1.5,
4477
+ "learning_rate": 0.0002,
4478
+ "loss": 0.712,
4479
+ "step": 4730
4480
+ },
4481
+ {
4482
+ "epoch": 1.5,
4483
+ "learning_rate": 0.0002,
4484
+ "loss": 0.6676,
4485
+ "step": 4740
4486
+ },
4487
+ {
4488
+ "epoch": 1.51,
4489
+ "learning_rate": 0.0002,
4490
+ "loss": 0.7193,
4491
+ "step": 4750
4492
+ },
4493
+ {
4494
+ "epoch": 1.51,
4495
+ "learning_rate": 0.0002,
4496
+ "loss": 0.6699,
4497
+ "step": 4760
4498
+ },
4499
+ {
4500
+ "epoch": 1.51,
4501
+ "learning_rate": 0.0002,
4502
+ "loss": 0.6718,
4503
+ "step": 4770
4504
+ },
4505
+ {
4506
+ "epoch": 1.51,
4507
+ "learning_rate": 0.0002,
4508
+ "loss": 0.6899,
4509
+ "step": 4780
4510
+ },
4511
+ {
4512
+ "epoch": 1.52,
4513
+ "learning_rate": 0.0002,
4514
+ "loss": 0.6954,
4515
+ "step": 4790
4516
+ },
4517
+ {
4518
+ "epoch": 1.52,
4519
+ "learning_rate": 0.0002,
4520
+ "loss": 0.7187,
4521
+ "step": 4800
4522
+ },
4523
+ {
4524
+ "epoch": 1.52,
4525
+ "eval_loss": 0.7387924790382385,
4526
+ "eval_runtime": 111.1141,
4527
+ "eval_samples_per_second": 9.0,
4528
+ "eval_steps_per_second": 4.5,
4529
+ "step": 4800
4530
+ },
4531
+ {
4532
+ "epoch": 1.52,
4533
+ "mmlu_eval_accuracy": 0.4879926358283337,
4534
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4535
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
4536
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4537
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4538
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4539
+ "mmlu_eval_accuracy_college_biology": 0.375,
4540
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
4541
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4542
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
4543
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
4544
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4545
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
4546
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4547
+ "mmlu_eval_accuracy_econometrics": 0.25,
4548
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
4549
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
4550
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
4551
+ "mmlu_eval_accuracy_global_facts": 0.5,
4552
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
4553
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4554
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4555
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
4556
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
4557
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
4558
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
4559
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
4560
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
4561
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
4562
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
4563
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
4564
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
4565
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4566
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4567
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4568
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4569
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4570
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4571
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
4572
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4573
+ "mmlu_eval_accuracy_marketing": 0.8,
4574
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4575
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
4576
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
4577
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4578
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
4579
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
4580
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
4581
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
4582
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
4583
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
4584
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
4585
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4586
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4587
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
4588
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4589
+ "mmlu_eval_accuracy_virology": 0.5,
4590
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4591
+ "mmlu_loss": 1.4884750641901874,
4592
+ "step": 4800
4593
+ },
4594
+ {
4595
+ "epoch": 1.52,
4596
+ "learning_rate": 0.0002,
4597
+ "loss": 0.6733,
4598
+ "step": 4810
4599
+ },
4600
+ {
4601
+ "epoch": 1.53,
4602
+ "learning_rate": 0.0002,
4603
+ "loss": 0.6607,
4604
+ "step": 4820
4605
+ },
4606
+ {
4607
+ "epoch": 1.53,
4608
+ "learning_rate": 0.0002,
4609
+ "loss": 0.6933,
4610
+ "step": 4830
4611
+ },
4612
+ {
4613
+ "epoch": 1.53,
4614
+ "learning_rate": 0.0002,
4615
+ "loss": 0.7517,
4616
+ "step": 4840
4617
+ },
4618
+ {
4619
+ "epoch": 1.54,
4620
+ "learning_rate": 0.0002,
4621
+ "loss": 0.7391,
4622
+ "step": 4850
4623
+ },
4624
+ {
4625
+ "epoch": 1.54,
4626
+ "learning_rate": 0.0002,
4627
+ "loss": 0.6636,
4628
+ "step": 4860
4629
+ },
4630
+ {
4631
+ "epoch": 1.54,
4632
+ "learning_rate": 0.0002,
4633
+ "loss": 0.7221,
4634
+ "step": 4870
4635
+ },
4636
+ {
4637
+ "epoch": 1.55,
4638
+ "learning_rate": 0.0002,
4639
+ "loss": 0.6967,
4640
+ "step": 4880
4641
+ },
4642
+ {
4643
+ "epoch": 1.55,
4644
+ "learning_rate": 0.0002,
4645
+ "loss": 0.7117,
4646
+ "step": 4890
4647
+ },
4648
+ {
4649
+ "epoch": 1.55,
4650
+ "learning_rate": 0.0002,
4651
+ "loss": 0.6256,
4652
+ "step": 4900
4653
+ },
4654
+ {
4655
+ "epoch": 1.56,
4656
+ "learning_rate": 0.0002,
4657
+ "loss": 0.7923,
4658
+ "step": 4910
4659
+ },
4660
+ {
4661
+ "epoch": 1.56,
4662
+ "learning_rate": 0.0002,
4663
+ "loss": 0.7151,
4664
+ "step": 4920
4665
+ },
4666
+ {
4667
+ "epoch": 1.56,
4668
+ "learning_rate": 0.0002,
4669
+ "loss": 0.7119,
4670
+ "step": 4930
4671
+ },
4672
+ {
4673
+ "epoch": 1.57,
4674
+ "learning_rate": 0.0002,
4675
+ "loss": 0.7105,
4676
+ "step": 4940
4677
+ },
4678
+ {
4679
+ "epoch": 1.57,
4680
+ "learning_rate": 0.0002,
4681
+ "loss": 0.6653,
4682
+ "step": 4950
4683
+ },
4684
+ {
4685
+ "epoch": 1.57,
4686
+ "learning_rate": 0.0002,
4687
+ "loss": 0.7084,
4688
+ "step": 4960
4689
+ },
4690
+ {
4691
+ "epoch": 1.57,
4692
+ "learning_rate": 0.0002,
4693
+ "loss": 0.6644,
4694
+ "step": 4970
4695
+ },
4696
+ {
4697
+ "epoch": 1.58,
4698
+ "learning_rate": 0.0002,
4699
+ "loss": 0.6665,
4700
+ "step": 4980
4701
+ },
4702
+ {
4703
+ "epoch": 1.58,
4704
+ "learning_rate": 0.0002,
4705
+ "loss": 0.6746,
4706
+ "step": 4990
4707
+ },
4708
+ {
4709
+ "epoch": 1.58,
4710
+ "learning_rate": 0.0002,
4711
+ "loss": 0.7223,
4712
+ "step": 5000
4713
+ },
4714
+ {
4715
+ "epoch": 1.58,
4716
+ "eval_loss": 0.7373215556144714,
4717
+ "eval_runtime": 111.2649,
4718
+ "eval_samples_per_second": 8.988,
4719
+ "eval_steps_per_second": 4.494,
4720
+ "step": 5000
4721
+ },
4722
+ {
4723
+ "epoch": 1.58,
4724
+ "mmlu_eval_accuracy": 0.46701126611778865,
4725
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
4726
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
4727
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4728
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4729
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4730
+ "mmlu_eval_accuracy_college_biology": 0.375,
4731
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4732
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4733
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
4734
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
4735
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4736
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
4737
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4738
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
4739
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4740
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
4741
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4742
+ "mmlu_eval_accuracy_global_facts": 0.3,
4743
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
4744
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4745
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4746
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
4747
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
4748
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
4749
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
4750
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
4751
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
4752
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
4753
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
4754
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
4755
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
4756
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4757
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
4758
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4759
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4760
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4761
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
4762
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
4763
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4764
+ "mmlu_eval_accuracy_marketing": 0.72,
4765
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4766
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
4767
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
4768
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4769
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
4770
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
4771
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
4772
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
4773
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
4774
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
4775
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
4776
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4777
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4778
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
4779
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4780
+ "mmlu_eval_accuracy_virology": 0.5,
4781
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4782
+ "mmlu_loss": 1.4578603010031324,
4783
+ "step": 5000
4784
+ },
4785
+ {
4786
+ "epoch": 1.59,
4787
+ "learning_rate": 0.0002,
4788
+ "loss": 0.6833,
4789
+ "step": 5010
4790
+ },
4791
+ {
4792
+ "epoch": 1.59,
4793
+ "learning_rate": 0.0002,
4794
+ "loss": 0.7323,
4795
+ "step": 5020
4796
+ },
4797
+ {
4798
+ "epoch": 1.59,
4799
+ "learning_rate": 0.0002,
4800
+ "loss": 0.7224,
4801
+ "step": 5030
4802
+ },
4803
+ {
4804
+ "epoch": 1.6,
4805
+ "learning_rate": 0.0002,
4806
+ "loss": 0.734,
4807
+ "step": 5040
4808
+ },
4809
+ {
4810
+ "epoch": 1.6,
4811
+ "learning_rate": 0.0002,
4812
+ "loss": 0.692,
4813
+ "step": 5050
4814
+ },
4815
+ {
4816
+ "epoch": 1.6,
4817
+ "learning_rate": 0.0002,
4818
+ "loss": 0.7083,
4819
+ "step": 5060
4820
+ },
4821
+ {
4822
+ "epoch": 1.61,
4823
+ "learning_rate": 0.0002,
4824
+ "loss": 0.6993,
4825
+ "step": 5070
4826
+ },
4827
+ {
4828
+ "epoch": 1.61,
4829
+ "learning_rate": 0.0002,
4830
+ "loss": 0.755,
4831
+ "step": 5080
4832
+ },
4833
+ {
4834
+ "epoch": 1.61,
4835
+ "learning_rate": 0.0002,
4836
+ "loss": 0.7323,
4837
+ "step": 5090
4838
+ },
4839
+ {
4840
+ "epoch": 1.62,
4841
+ "learning_rate": 0.0002,
4842
+ "loss": 0.6725,
4843
+ "step": 5100
4844
+ },
4845
+ {
4846
+ "epoch": 1.62,
4847
+ "learning_rate": 0.0002,
4848
+ "loss": 0.6989,
4849
+ "step": 5110
4850
+ },
4851
+ {
4852
+ "epoch": 1.62,
4853
+ "learning_rate": 0.0002,
4854
+ "loss": 0.6938,
4855
+ "step": 5120
4856
+ },
4857
+ {
4858
+ "epoch": 1.63,
4859
+ "learning_rate": 0.0002,
4860
+ "loss": 0.6895,
4861
+ "step": 5130
4862
+ },
4863
+ {
4864
+ "epoch": 1.63,
4865
+ "learning_rate": 0.0002,
4866
+ "loss": 0.6915,
4867
+ "step": 5140
4868
+ },
4869
+ {
4870
+ "epoch": 1.63,
4871
+ "learning_rate": 0.0002,
4872
+ "loss": 0.7672,
4873
+ "step": 5150
4874
+ },
4875
+ {
4876
+ "epoch": 1.63,
4877
+ "learning_rate": 0.0002,
4878
+ "loss": 0.6413,
4879
+ "step": 5160
4880
+ },
4881
+ {
4882
+ "epoch": 1.64,
4883
+ "learning_rate": 0.0002,
4884
+ "loss": 0.7195,
4885
+ "step": 5170
4886
+ },
4887
+ {
4888
+ "epoch": 1.64,
4889
+ "learning_rate": 0.0002,
4890
+ "loss": 0.6783,
4891
+ "step": 5180
4892
+ },
4893
+ {
4894
+ "epoch": 1.64,
4895
+ "learning_rate": 0.0002,
4896
+ "loss": 0.6457,
4897
+ "step": 5190
4898
+ },
4899
+ {
4900
+ "epoch": 1.65,
4901
+ "learning_rate": 0.0002,
4902
+ "loss": 0.6959,
4903
+ "step": 5200
4904
+ },
4905
+ {
4906
+ "epoch": 1.65,
4907
+ "eval_loss": 0.736714243888855,
4908
+ "eval_runtime": 111.0389,
4909
+ "eval_samples_per_second": 9.006,
4910
+ "eval_steps_per_second": 4.503,
4911
+ "step": 5200
4912
+ },
4913
+ {
4914
+ "epoch": 1.65,
4915
+ "mmlu_eval_accuracy": 0.4835750759985151,
4916
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4917
+ "mmlu_eval_accuracy_anatomy": 0.7857142857142857,
4918
+ "mmlu_eval_accuracy_astronomy": 0.375,
4919
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4920
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4921
+ "mmlu_eval_accuracy_college_biology": 0.4375,
4922
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4923
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4924
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
4925
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
4926
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4927
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
4928
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4929
+ "mmlu_eval_accuracy_econometrics": 0.25,
4930
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4931
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
4932
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4933
+ "mmlu_eval_accuracy_global_facts": 0.5,
4934
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
4935
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4936
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4937
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
4938
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
4939
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
4940
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
4941
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
4942
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
4943
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
4944
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
4945
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
4946
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
4947
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4948
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4949
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
4950
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4951
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4952
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4953
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
4954
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
4955
+ "mmlu_eval_accuracy_marketing": 0.76,
4956
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
4957
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
4958
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
4959
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4960
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
4961
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
4962
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
4963
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
4964
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
4965
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
4966
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
4967
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
4968
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
4969
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
4970
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4971
+ "mmlu_eval_accuracy_virology": 0.5,
4972
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
4973
+ "mmlu_loss": 1.2928575183564004,
4974
+ "step": 5200
4975
  }
4976
  ],
4977
  "max_steps": 10000,
4978
  "num_train_epochs": 4,
4979
+ "total_flos": 1.5783912029175153e+18,
4980
  "trial_name": null,
4981
  "trial_params": null
4982
  }
{checkpoint-3200 β†’ checkpoint-5200}/training_args.bin RENAMED
File without changes