prateeky2806 commited on
Commit
4c03f04
1 Parent(s): b01e79f

Training in progress, step 1200

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28eefc162ea028a7ab1c1acd21f5386ff63e683e9dc305f229b2c831bc20ab3a
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d09c44ff41dba720f4e0eada63271caeae352166d25852697b3a949e4551746
3
  size 319977229
checkpoint-1000/adapter_model/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-1000/adapter_model/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "down_proj",
19
+ "o_proj",
20
+ "k_proj",
21
+ "gate_proj",
22
+ "q_proj",
23
+ "up_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-1000/adapter_model/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28eefc162ea028a7ab1c1acd21f5386ff63e683e9dc305f229b2c831bc20ab3a
3
+ size 319977229
checkpoint-1200/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "down_proj",
19
+ "o_proj",
20
+ "k_proj",
21
+ "gate_proj",
22
+ "q_proj",
23
+ "up_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-1200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d09c44ff41dba720f4e0eada63271caeae352166d25852697b3a949e4551746
3
+ size 319977229
checkpoint-1200/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
checkpoint-1200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77fb1ed1d3df3c9a151b6a5c6790cdf7ff091ea61a0ca175442d686084dfcd5
3
+ size 1279539973
checkpoint-1200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49cc75508b2be72e9546c7543995494ee6d00f2f1add61445923f52c25602b73
3
+ size 14511
checkpoint-1200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
+ size 627
checkpoint-1200/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": "<unk>"
6
+ }
checkpoint-1200/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-1200/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-1200/trainer_state.json ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4976211488246918,
3
+ "best_model_checkpoint": "./output_v2/7b_cluster09_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_09/checkpoint-1200",
4
+ "epoch": 1.4796547472256474,
5
+ "global_step": 1200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 0.0002,
13
+ "loss": 0.597,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.02,
18
+ "learning_rate": 0.0002,
19
+ "loss": 0.5778,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.04,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.5677,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.05,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.5528,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.06,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.5558,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.07,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.5571,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.09,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.5499,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.1,
54
+ "learning_rate": 0.0002,
55
+ "loss": 0.5491,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.11,
60
+ "learning_rate": 0.0002,
61
+ "loss": 0.5407,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.12,
66
+ "learning_rate": 0.0002,
67
+ "loss": 0.5492,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.14,
72
+ "learning_rate": 0.0002,
73
+ "loss": 0.5258,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.15,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.5217,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.16,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.538,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.17,
90
+ "learning_rate": 0.0002,
91
+ "loss": 0.5265,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.18,
96
+ "learning_rate": 0.0002,
97
+ "loss": 0.5344,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.2,
102
+ "learning_rate": 0.0002,
103
+ "loss": 0.5361,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.21,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.5186,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.22,
114
+ "learning_rate": 0.0002,
115
+ "loss": 0.5312,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.23,
120
+ "learning_rate": 0.0002,
121
+ "loss": 0.5395,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.25,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.5399,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.25,
132
+ "eval_loss": 0.533891499042511,
133
+ "eval_runtime": 249.6236,
134
+ "eval_samples_per_second": 4.006,
135
+ "eval_steps_per_second": 2.003,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.25,
140
+ "mmlu_eval_accuracy": 0.46207163729626294,
141
+ "mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091,
142
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
143
+ "mmlu_eval_accuracy_astronomy": 0.375,
144
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
146
+ "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
149
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
150
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
151
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
152
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
154
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
156
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
157
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
158
+ "mmlu_eval_accuracy_global_facts": 0.5,
159
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
160
+ "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453,
161
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
163
+ "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
164
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
165
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
166
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
168
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
169
+ "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
172
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
173
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
175
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
176
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
177
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
178
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
179
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
180
+ "mmlu_eval_accuracy_marketing": 0.72,
181
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
183
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
185
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
186
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
187
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
188
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
190
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
192
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
193
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
194
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
195
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
196
+ "mmlu_eval_accuracy_virology": 0.3888888888888889,
197
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
+ "mmlu_loss": 1.267449478023046,
199
+ "step": 200
200
+ },
201
+ {
202
+ "epoch": 0.26,
203
+ "learning_rate": 0.0002,
204
+ "loss": 0.524,
205
+ "step": 210
206
+ },
207
+ {
208
+ "epoch": 0.27,
209
+ "learning_rate": 0.0002,
210
+ "loss": 0.5484,
211
+ "step": 220
212
+ },
213
+ {
214
+ "epoch": 0.28,
215
+ "learning_rate": 0.0002,
216
+ "loss": 0.5247,
217
+ "step": 230
218
+ },
219
+ {
220
+ "epoch": 0.3,
221
+ "learning_rate": 0.0002,
222
+ "loss": 0.5305,
223
+ "step": 240
224
+ },
225
+ {
226
+ "epoch": 0.31,
227
+ "learning_rate": 0.0002,
228
+ "loss": 0.5179,
229
+ "step": 250
230
+ },
231
+ {
232
+ "epoch": 0.32,
233
+ "learning_rate": 0.0002,
234
+ "loss": 0.5408,
235
+ "step": 260
236
+ },
237
+ {
238
+ "epoch": 0.33,
239
+ "learning_rate": 0.0002,
240
+ "loss": 0.5472,
241
+ "step": 270
242
+ },
243
+ {
244
+ "epoch": 0.35,
245
+ "learning_rate": 0.0002,
246
+ "loss": 0.5136,
247
+ "step": 280
248
+ },
249
+ {
250
+ "epoch": 0.36,
251
+ "learning_rate": 0.0002,
252
+ "loss": 0.5262,
253
+ "step": 290
254
+ },
255
+ {
256
+ "epoch": 0.37,
257
+ "learning_rate": 0.0002,
258
+ "loss": 0.5361,
259
+ "step": 300
260
+ },
261
+ {
262
+ "epoch": 0.38,
263
+ "learning_rate": 0.0002,
264
+ "loss": 0.5007,
265
+ "step": 310
266
+ },
267
+ {
268
+ "epoch": 0.39,
269
+ "learning_rate": 0.0002,
270
+ "loss": 0.5211,
271
+ "step": 320
272
+ },
273
+ {
274
+ "epoch": 0.41,
275
+ "learning_rate": 0.0002,
276
+ "loss": 0.5217,
277
+ "step": 330
278
+ },
279
+ {
280
+ "epoch": 0.42,
281
+ "learning_rate": 0.0002,
282
+ "loss": 0.5337,
283
+ "step": 340
284
+ },
285
+ {
286
+ "epoch": 0.43,
287
+ "learning_rate": 0.0002,
288
+ "loss": 0.5113,
289
+ "step": 350
290
+ },
291
+ {
292
+ "epoch": 0.44,
293
+ "learning_rate": 0.0002,
294
+ "loss": 0.518,
295
+ "step": 360
296
+ },
297
+ {
298
+ "epoch": 0.46,
299
+ "learning_rate": 0.0002,
300
+ "loss": 0.5151,
301
+ "step": 370
302
+ },
303
+ {
304
+ "epoch": 0.47,
305
+ "learning_rate": 0.0002,
306
+ "loss": 0.5133,
307
+ "step": 380
308
+ },
309
+ {
310
+ "epoch": 0.48,
311
+ "learning_rate": 0.0002,
312
+ "loss": 0.5083,
313
+ "step": 390
314
+ },
315
+ {
316
+ "epoch": 0.49,
317
+ "learning_rate": 0.0002,
318
+ "loss": 0.5235,
319
+ "step": 400
320
+ },
321
+ {
322
+ "epoch": 0.49,
323
+ "eval_loss": 0.5213926434516907,
324
+ "eval_runtime": 249.5749,
325
+ "eval_samples_per_second": 4.007,
326
+ "eval_steps_per_second": 2.003,
327
+ "step": 400
328
+ },
329
+ {
330
+ "epoch": 0.49,
331
+ "mmlu_eval_accuracy": 0.45812855653406065,
332
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
+ "mmlu_eval_accuracy_astronomy": 0.4375,
335
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
+ "mmlu_eval_accuracy_college_biology": 0.375,
338
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
339
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
340
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
341
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
342
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
343
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
344
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
345
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
348
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
349
+ "mmlu_eval_accuracy_global_facts": 0.6,
350
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
351
+ "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453,
352
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
354
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
355
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
356
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
357
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
359
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.75,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
364
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
365
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
366
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
368
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
369
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
371
+ "mmlu_eval_accuracy_marketing": 0.8,
372
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
374
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
376
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
378
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
379
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
380
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
381
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
382
+ "mmlu_eval_accuracy_professional_psychology": 0.36231884057971014,
383
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
384
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
385
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
386
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
387
+ "mmlu_eval_accuracy_virology": 0.3333333333333333,
388
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
389
+ "mmlu_loss": 1.1127305320092031,
390
+ "step": 400
391
+ },
392
+ {
393
+ "epoch": 0.51,
394
+ "learning_rate": 0.0002,
395
+ "loss": 0.5194,
396
+ "step": 410
397
+ },
398
+ {
399
+ "epoch": 0.52,
400
+ "learning_rate": 0.0002,
401
+ "loss": 0.5279,
402
+ "step": 420
403
+ },
404
+ {
405
+ "epoch": 0.53,
406
+ "learning_rate": 0.0002,
407
+ "loss": 0.5105,
408
+ "step": 430
409
+ },
410
+ {
411
+ "epoch": 0.54,
412
+ "learning_rate": 0.0002,
413
+ "loss": 0.5427,
414
+ "step": 440
415
+ },
416
+ {
417
+ "epoch": 0.55,
418
+ "learning_rate": 0.0002,
419
+ "loss": 0.5276,
420
+ "step": 450
421
+ },
422
+ {
423
+ "epoch": 0.57,
424
+ "learning_rate": 0.0002,
425
+ "loss": 0.4865,
426
+ "step": 460
427
+ },
428
+ {
429
+ "epoch": 0.58,
430
+ "learning_rate": 0.0002,
431
+ "loss": 0.5161,
432
+ "step": 470
433
+ },
434
+ {
435
+ "epoch": 0.59,
436
+ "learning_rate": 0.0002,
437
+ "loss": 0.513,
438
+ "step": 480
439
+ },
440
+ {
441
+ "epoch": 0.6,
442
+ "learning_rate": 0.0002,
443
+ "loss": 0.5284,
444
+ "step": 490
445
+ },
446
+ {
447
+ "epoch": 0.62,
448
+ "learning_rate": 0.0002,
449
+ "loss": 0.5101,
450
+ "step": 500
451
+ },
452
+ {
453
+ "epoch": 0.63,
454
+ "learning_rate": 0.0002,
455
+ "loss": 0.5218,
456
+ "step": 510
457
+ },
458
+ {
459
+ "epoch": 0.64,
460
+ "learning_rate": 0.0002,
461
+ "loss": 0.5087,
462
+ "step": 520
463
+ },
464
+ {
465
+ "epoch": 0.65,
466
+ "learning_rate": 0.0002,
467
+ "loss": 0.5157,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.67,
472
+ "learning_rate": 0.0002,
473
+ "loss": 0.501,
474
+ "step": 540
475
+ },
476
+ {
477
+ "epoch": 0.68,
478
+ "learning_rate": 0.0002,
479
+ "loss": 0.508,
480
+ "step": 550
481
+ },
482
+ {
483
+ "epoch": 0.69,
484
+ "learning_rate": 0.0002,
485
+ "loss": 0.5199,
486
+ "step": 560
487
+ },
488
+ {
489
+ "epoch": 0.7,
490
+ "learning_rate": 0.0002,
491
+ "loss": 0.5043,
492
+ "step": 570
493
+ },
494
+ {
495
+ "epoch": 0.72,
496
+ "learning_rate": 0.0002,
497
+ "loss": 0.5069,
498
+ "step": 580
499
+ },
500
+ {
501
+ "epoch": 0.73,
502
+ "learning_rate": 0.0002,
503
+ "loss": 0.5258,
504
+ "step": 590
505
+ },
506
+ {
507
+ "epoch": 0.74,
508
+ "learning_rate": 0.0002,
509
+ "loss": 0.5189,
510
+ "step": 600
511
+ },
512
+ {
513
+ "epoch": 0.74,
514
+ "eval_loss": 0.5119001865386963,
515
+ "eval_runtime": 249.8867,
516
+ "eval_samples_per_second": 4.002,
517
+ "eval_steps_per_second": 2.001,
518
+ "step": 600
519
+ },
520
+ {
521
+ "epoch": 0.74,
522
+ "mmlu_eval_accuracy": 0.45806114323766334,
523
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
524
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
525
+ "mmlu_eval_accuracy_astronomy": 0.4375,
526
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
527
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
528
+ "mmlu_eval_accuracy_college_biology": 0.375,
529
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
530
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
531
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
532
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
533
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
534
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
536
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
537
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
538
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
539
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
540
+ "mmlu_eval_accuracy_global_facts": 0.6,
541
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
542
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
543
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
544
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
545
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
546
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
547
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
548
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
549
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
550
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
551
+ "mmlu_eval_accuracy_high_school_psychology": 0.7666666666666667,
552
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
553
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
554
+ "mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
555
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
556
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
557
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
558
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
559
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
560
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
561
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
562
+ "mmlu_eval_accuracy_marketing": 0.8,
563
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
564
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
565
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
566
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
567
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
568
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
569
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
570
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
571
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
572
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
573
+ "mmlu_eval_accuracy_professional_psychology": 0.36231884057971014,
574
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
575
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
576
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
577
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
578
+ "mmlu_eval_accuracy_virology": 0.3333333333333333,
579
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
580
+ "mmlu_loss": 1.1718710024425318,
581
+ "step": 600
582
+ },
583
+ {
584
+ "epoch": 0.75,
585
+ "learning_rate": 0.0002,
586
+ "loss": 0.5234,
587
+ "step": 610
588
+ },
589
+ {
590
+ "epoch": 0.76,
591
+ "learning_rate": 0.0002,
592
+ "loss": 0.5205,
593
+ "step": 620
594
+ },
595
+ {
596
+ "epoch": 0.78,
597
+ "learning_rate": 0.0002,
598
+ "loss": 0.5146,
599
+ "step": 630
600
+ },
601
+ {
602
+ "epoch": 0.79,
603
+ "learning_rate": 0.0002,
604
+ "loss": 0.5094,
605
+ "step": 640
606
+ },
607
+ {
608
+ "epoch": 0.8,
609
+ "learning_rate": 0.0002,
610
+ "loss": 0.4959,
611
+ "step": 650
612
+ },
613
+ {
614
+ "epoch": 0.81,
615
+ "learning_rate": 0.0002,
616
+ "loss": 0.5001,
617
+ "step": 660
618
+ },
619
+ {
620
+ "epoch": 0.83,
621
+ "learning_rate": 0.0002,
622
+ "loss": 0.5007,
623
+ "step": 670
624
+ },
625
+ {
626
+ "epoch": 0.84,
627
+ "learning_rate": 0.0002,
628
+ "loss": 0.5029,
629
+ "step": 680
630
+ },
631
+ {
632
+ "epoch": 0.85,
633
+ "learning_rate": 0.0002,
634
+ "loss": 0.5143,
635
+ "step": 690
636
+ },
637
+ {
638
+ "epoch": 0.86,
639
+ "learning_rate": 0.0002,
640
+ "loss": 0.4983,
641
+ "step": 700
642
+ },
643
+ {
644
+ "epoch": 0.88,
645
+ "learning_rate": 0.0002,
646
+ "loss": 0.4995,
647
+ "step": 710
648
+ },
649
+ {
650
+ "epoch": 0.89,
651
+ "learning_rate": 0.0002,
652
+ "loss": 0.5072,
653
+ "step": 720
654
+ },
655
+ {
656
+ "epoch": 0.9,
657
+ "learning_rate": 0.0002,
658
+ "loss": 0.499,
659
+ "step": 730
660
+ },
661
+ {
662
+ "epoch": 0.91,
663
+ "learning_rate": 0.0002,
664
+ "loss": 0.505,
665
+ "step": 740
666
+ },
667
+ {
668
+ "epoch": 0.92,
669
+ "learning_rate": 0.0002,
670
+ "loss": 0.4917,
671
+ "step": 750
672
+ },
673
+ {
674
+ "epoch": 0.94,
675
+ "learning_rate": 0.0002,
676
+ "loss": 0.4983,
677
+ "step": 760
678
+ },
679
+ {
680
+ "epoch": 0.95,
681
+ "learning_rate": 0.0002,
682
+ "loss": 0.4946,
683
+ "step": 770
684
+ },
685
+ {
686
+ "epoch": 0.96,
687
+ "learning_rate": 0.0002,
688
+ "loss": 0.4931,
689
+ "step": 780
690
+ },
691
+ {
692
+ "epoch": 0.97,
693
+ "learning_rate": 0.0002,
694
+ "loss": 0.4836,
695
+ "step": 790
696
+ },
697
+ {
698
+ "epoch": 0.99,
699
+ "learning_rate": 0.0002,
700
+ "loss": 0.5001,
701
+ "step": 800
702
+ },
703
+ {
704
+ "epoch": 0.99,
705
+ "eval_loss": 0.5022817254066467,
706
+ "eval_runtime": 249.7465,
707
+ "eval_samples_per_second": 4.004,
708
+ "eval_steps_per_second": 2.002,
709
+ "step": 800
710
+ },
711
+ {
712
+ "epoch": 0.99,
713
+ "mmlu_eval_accuracy": 0.45223086902107806,
714
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
715
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
716
+ "mmlu_eval_accuracy_astronomy": 0.4375,
717
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
718
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
719
+ "mmlu_eval_accuracy_college_biology": 0.4375,
720
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
721
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
722
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
723
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
724
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
725
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
726
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
727
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
728
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
729
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
730
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
731
+ "mmlu_eval_accuracy_global_facts": 0.5,
732
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
733
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
734
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
736
+ "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
737
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
738
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
739
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
740
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
741
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
742
+ "mmlu_eval_accuracy_high_school_psychology": 0.75,
743
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
744
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
745
+ "mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
746
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
747
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
748
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
749
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
750
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
751
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
752
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
753
+ "mmlu_eval_accuracy_marketing": 0.8,
754
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
755
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
756
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
757
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
758
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
759
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
760
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
761
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
762
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
763
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
764
+ "mmlu_eval_accuracy_professional_psychology": 0.36231884057971014,
765
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
766
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
767
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
768
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
769
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
770
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
771
+ "mmlu_loss": 1.064671132450938,
772
+ "step": 800
773
+ },
774
+ {
775
+ "epoch": 1.0,
776
+ "learning_rate": 0.0002,
777
+ "loss": 0.5135,
778
+ "step": 810
779
+ },
780
+ {
781
+ "epoch": 1.01,
782
+ "learning_rate": 0.0002,
783
+ "loss": 0.4532,
784
+ "step": 820
785
+ },
786
+ {
787
+ "epoch": 1.02,
788
+ "learning_rate": 0.0002,
789
+ "loss": 0.4483,
790
+ "step": 830
791
+ },
792
+ {
793
+ "epoch": 1.04,
794
+ "learning_rate": 0.0002,
795
+ "loss": 0.4507,
796
+ "step": 840
797
+ },
798
+ {
799
+ "epoch": 1.05,
800
+ "learning_rate": 0.0002,
801
+ "loss": 0.4572,
802
+ "step": 850
803
+ },
804
+ {
805
+ "epoch": 1.06,
806
+ "learning_rate": 0.0002,
807
+ "loss": 0.4346,
808
+ "step": 860
809
+ },
810
+ {
811
+ "epoch": 1.07,
812
+ "learning_rate": 0.0002,
813
+ "loss": 0.4306,
814
+ "step": 870
815
+ },
816
+ {
817
+ "epoch": 1.09,
818
+ "learning_rate": 0.0002,
819
+ "loss": 0.439,
820
+ "step": 880
821
+ },
822
+ {
823
+ "epoch": 1.1,
824
+ "learning_rate": 0.0002,
825
+ "loss": 0.4215,
826
+ "step": 890
827
+ },
828
+ {
829
+ "epoch": 1.11,
830
+ "learning_rate": 0.0002,
831
+ "loss": 0.4608,
832
+ "step": 900
833
+ },
834
+ {
835
+ "epoch": 1.12,
836
+ "learning_rate": 0.0002,
837
+ "loss": 0.4345,
838
+ "step": 910
839
+ },
840
+ {
841
+ "epoch": 1.13,
842
+ "learning_rate": 0.0002,
843
+ "loss": 0.422,
844
+ "step": 920
845
+ },
846
+ {
847
+ "epoch": 1.15,
848
+ "learning_rate": 0.0002,
849
+ "loss": 0.4444,
850
+ "step": 930
851
+ },
852
+ {
853
+ "epoch": 1.16,
854
+ "learning_rate": 0.0002,
855
+ "loss": 0.4649,
856
+ "step": 940
857
+ },
858
+ {
859
+ "epoch": 1.17,
860
+ "learning_rate": 0.0002,
861
+ "loss": 0.4508,
862
+ "step": 950
863
+ },
864
+ {
865
+ "epoch": 1.18,
866
+ "learning_rate": 0.0002,
867
+ "loss": 0.439,
868
+ "step": 960
869
+ },
870
+ {
871
+ "epoch": 1.2,
872
+ "learning_rate": 0.0002,
873
+ "loss": 0.4347,
874
+ "step": 970
875
+ },
876
+ {
877
+ "epoch": 1.21,
878
+ "learning_rate": 0.0002,
879
+ "loss": 0.4413,
880
+ "step": 980
881
+ },
882
+ {
883
+ "epoch": 1.22,
884
+ "learning_rate": 0.0002,
885
+ "loss": 0.4337,
886
+ "step": 990
887
+ },
888
+ {
889
+ "epoch": 1.23,
890
+ "learning_rate": 0.0002,
891
+ "loss": 0.4358,
892
+ "step": 1000
893
+ },
894
+ {
895
+ "epoch": 1.23,
896
+ "eval_loss": 0.5019292235374451,
897
+ "eval_runtime": 249.7097,
898
+ "eval_samples_per_second": 4.005,
899
+ "eval_steps_per_second": 2.002,
900
+ "step": 1000
901
+ },
902
+ {
903
+ "epoch": 1.23,
904
+ "mmlu_eval_accuracy": 0.46197732544268794,
905
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
906
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
907
+ "mmlu_eval_accuracy_astronomy": 0.375,
908
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
909
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
910
+ "mmlu_eval_accuracy_college_biology": 0.5,
911
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
912
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
913
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
914
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
915
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
916
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
917
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
918
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
919
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
920
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
921
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
922
+ "mmlu_eval_accuracy_global_facts": 0.5,
923
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
924
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
925
+ "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
926
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
927
+ "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
928
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
929
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
930
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
931
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
932
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
933
+ "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333,
934
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
935
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
936
+ "mmlu_eval_accuracy_high_school_world_history": 0.6153846153846154,
937
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
938
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
939
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
940
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
941
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
942
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
943
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
944
+ "mmlu_eval_accuracy_marketing": 0.8,
945
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
946
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
947
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
948
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
949
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
950
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
951
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
952
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
953
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
954
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
955
+ "mmlu_eval_accuracy_professional_psychology": 0.3188405797101449,
956
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
957
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
958
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
959
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
960
+ "mmlu_eval_accuracy_virology": 0.5,
961
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
962
+ "mmlu_loss": 1.103624303117589,
963
+ "step": 1000
964
+ },
965
+ {
966
+ "epoch": 1.25,
967
+ "learning_rate": 0.0002,
968
+ "loss": 0.4428,
969
+ "step": 1010
970
+ },
971
+ {
972
+ "epoch": 1.26,
973
+ "learning_rate": 0.0002,
974
+ "loss": 0.4306,
975
+ "step": 1020
976
+ },
977
+ {
978
+ "epoch": 1.27,
979
+ "learning_rate": 0.0002,
980
+ "loss": 0.4585,
981
+ "step": 1030
982
+ },
983
+ {
984
+ "epoch": 1.28,
985
+ "learning_rate": 0.0002,
986
+ "loss": 0.4323,
987
+ "step": 1040
988
+ },
989
+ {
990
+ "epoch": 1.29,
991
+ "learning_rate": 0.0002,
992
+ "loss": 0.4333,
993
+ "step": 1050
994
+ },
995
+ {
996
+ "epoch": 1.31,
997
+ "learning_rate": 0.0002,
998
+ "loss": 0.4364,
999
+ "step": 1060
1000
+ },
1001
+ {
1002
+ "epoch": 1.32,
1003
+ "learning_rate": 0.0002,
1004
+ "loss": 0.4256,
1005
+ "step": 1070
1006
+ },
1007
+ {
1008
+ "epoch": 1.33,
1009
+ "learning_rate": 0.0002,
1010
+ "loss": 0.4197,
1011
+ "step": 1080
1012
+ },
1013
+ {
1014
+ "epoch": 1.34,
1015
+ "learning_rate": 0.0002,
1016
+ "loss": 0.4382,
1017
+ "step": 1090
1018
+ },
1019
+ {
1020
+ "epoch": 1.36,
1021
+ "learning_rate": 0.0002,
1022
+ "loss": 0.4489,
1023
+ "step": 1100
1024
+ },
1025
+ {
1026
+ "epoch": 1.37,
1027
+ "learning_rate": 0.0002,
1028
+ "loss": 0.4152,
1029
+ "step": 1110
1030
+ },
1031
+ {
1032
+ "epoch": 1.38,
1033
+ "learning_rate": 0.0002,
1034
+ "loss": 0.425,
1035
+ "step": 1120
1036
+ },
1037
+ {
1038
+ "epoch": 1.39,
1039
+ "learning_rate": 0.0002,
1040
+ "loss": 0.4537,
1041
+ "step": 1130
1042
+ },
1043
+ {
1044
+ "epoch": 1.41,
1045
+ "learning_rate": 0.0002,
1046
+ "loss": 0.4496,
1047
+ "step": 1140
1048
+ },
1049
+ {
1050
+ "epoch": 1.42,
1051
+ "learning_rate": 0.0002,
1052
+ "loss": 0.4266,
1053
+ "step": 1150
1054
+ },
1055
+ {
1056
+ "epoch": 1.43,
1057
+ "learning_rate": 0.0002,
1058
+ "loss": 0.4449,
1059
+ "step": 1160
1060
+ },
1061
+ {
1062
+ "epoch": 1.44,
1063
+ "learning_rate": 0.0002,
1064
+ "loss": 0.4381,
1065
+ "step": 1170
1066
+ },
1067
+ {
1068
+ "epoch": 1.45,
1069
+ "learning_rate": 0.0002,
1070
+ "loss": 0.4272,
1071
+ "step": 1180
1072
+ },
1073
+ {
1074
+ "epoch": 1.47,
1075
+ "learning_rate": 0.0002,
1076
+ "loss": 0.4366,
1077
+ "step": 1190
1078
+ },
1079
+ {
1080
+ "epoch": 1.48,
1081
+ "learning_rate": 0.0002,
1082
+ "loss": 0.428,
1083
+ "step": 1200
1084
+ },
1085
+ {
1086
+ "epoch": 1.48,
1087
+ "eval_loss": 0.4976211488246918,
1088
+ "eval_runtime": 249.5918,
1089
+ "eval_samples_per_second": 4.007,
1090
+ "eval_steps_per_second": 2.003,
1091
+ "step": 1200
1092
+ },
1093
+ {
1094
+ "epoch": 1.48,
1095
+ "mmlu_eval_accuracy": 0.4642275199494228,
1096
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
1097
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
1098
+ "mmlu_eval_accuracy_astronomy": 0.375,
1099
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1100
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
1101
+ "mmlu_eval_accuracy_college_biology": 0.5,
1102
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
1103
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1104
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1105
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1106
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
1107
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
1108
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
1109
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1110
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
1111
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1112
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
1113
+ "mmlu_eval_accuracy_global_facts": 0.6,
1114
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1115
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
1116
+ "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
1117
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
1118
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
1119
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
1120
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
1121
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
1122
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
1123
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
1124
+ "mmlu_eval_accuracy_high_school_psychology": 0.7833333333333333,
1125
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
1126
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1127
+ "mmlu_eval_accuracy_high_school_world_history": 0.6153846153846154,
1128
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
1129
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
1130
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
1131
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
1132
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
1133
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
1134
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1135
+ "mmlu_eval_accuracy_marketing": 0.76,
1136
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1137
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
1138
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
1139
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1140
+ "mmlu_eval_accuracy_nutrition": 0.48484848484848486,
1141
+ "mmlu_eval_accuracy_philosophy": 0.38235294117647056,
1142
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
1143
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
1144
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
1145
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
1146
+ "mmlu_eval_accuracy_professional_psychology": 0.34782608695652173,
1147
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
1148
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
1149
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
1150
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
1151
+ "mmlu_eval_accuracy_virology": 0.5,
1152
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1153
+ "mmlu_loss": 1.0431902918079503,
1154
+ "step": 1200
1155
+ }
1156
+ ],
1157
+ "max_steps": 5000,
1158
+ "num_train_epochs": 7,
1159
+ "total_flos": 3.255388098480046e+17,
1160
+ "trial_name": null,
1161
+ "trial_params": null
1162
+ }
checkpoint-1200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6f17f777e9c65d11da656f4fb261a01c0f02664d582e7779684bfd1feb2542c
3
+ size 6011