Farouk commited on
Commit
6819c71
Β·
1 Parent(s): 11aa37a

Training in progress, step 3000

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa78d5c43ff3103ff5b4ef3a7994fa49636cabd5b4038a856a057d77b14c51dd
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a84a04c110cc561cd61eda592343bba095aae111a21ec4a1b06c61f6cc30ba2
3
  size 319977229
{checkpoint-800 β†’ checkpoint-2800/adapter_model/adapter_model}/README.md RENAMED
@@ -4,17 +4,6 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
-
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
@@ -27,6 +16,5 @@ The following `bitsandbytes` quantization config was used during training:
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
- - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
 
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
{checkpoint-800 β†’ checkpoint-2800/adapter_model/adapter_model}/adapter_config.json RENAMED
File without changes
{checkpoint-800 β†’ checkpoint-2800/adapter_model/adapter_model}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e76a12b40d96b039c516a755ff0fc82c6483336c390439b21ebc6df94896779f
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa78d5c43ff3103ff5b4ef3a7994fa49636cabd5b4038a856a057d77b14c51dd
3
  size 319977229
{checkpoint-800/adapter_model/adapter_model β†’ checkpoint-3000}/README.md RENAMED
@@ -4,17 +4,6 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
-
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
@@ -27,6 +16,5 @@ The following `bitsandbytes` quantization config was used during training:
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
- - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
 
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
{checkpoint-800/adapter_model/adapter_model β†’ checkpoint-3000}/adapter_config.json RENAMED
File without changes
{checkpoint-800/adapter_model/adapter_model β†’ checkpoint-3000}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e76a12b40d96b039c516a755ff0fc82c6483336c390439b21ebc6df94896779f
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a84a04c110cc561cd61eda592343bba095aae111a21ec4a1b06c61f6cc30ba2
3
  size 319977229
{checkpoint-800 β†’ checkpoint-3000}/added_tokens.json RENAMED
File without changes
{checkpoint-800 β†’ checkpoint-3000}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be25d67708276096612e3d4f1c60ff2d6079396ce302ff645fda3c9e333cb0f9
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e51335567120756b2f2498969d3ba19b06a8948dc9fec0e67df38875d806f284
3
  size 1279539973
{checkpoint-800 β†’ checkpoint-3000}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b2ac0bbb2415a408ba2161d5dc7b93cf5d07edaece22603a20a05406fb2f35f
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca9d129a31d8de5c6fefe2a276ebcc130763f788e0fba92404a1cf4060012e6
3
  size 14511
{checkpoint-800 β†’ checkpoint-3000}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb57538b0fddb988f32d5e33311bcf25efee1aa4001ec3e33ef4a2dd884d77d3
3
  size 627
{checkpoint-800 β†’ checkpoint-3000}/special_tokens_map.json RENAMED
File without changes
{checkpoint-800 β†’ checkpoint-3000}/tokenizer.model RENAMED
File without changes
{checkpoint-800 β†’ checkpoint-3000}/tokenizer_config.json RENAMED
File without changes
checkpoint-3000/trainer_state.json ADDED
@@ -0,0 +1,2881 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7343361377716064,
3
+ "best_model_checkpoint": "experts/expert-16/checkpoint-3000",
4
+ "epoch": 0.9505703422053232,
5
+ "global_step": 3000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 0.0002,
13
+ "loss": 0.8339,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0002,
19
+ "loss": 0.8289,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.9041,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.01,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.8491,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.8151,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.02,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.79,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.02,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.7835,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.03,
54
+ "learning_rate": 0.0002,
55
+ "loss": 0.8831,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.03,
60
+ "learning_rate": 0.0002,
61
+ "loss": 0.8607,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.03,
66
+ "learning_rate": 0.0002,
67
+ "loss": 0.7876,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.03,
72
+ "learning_rate": 0.0002,
73
+ "loss": 0.8031,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.04,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.8207,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.04,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.807,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.04,
90
+ "learning_rate": 0.0002,
91
+ "loss": 0.9262,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.05,
96
+ "learning_rate": 0.0002,
97
+ "loss": 0.7964,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.05,
102
+ "learning_rate": 0.0002,
103
+ "loss": 0.7879,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.05,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.7587,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.06,
114
+ "learning_rate": 0.0002,
115
+ "loss": 0.8091,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.06,
120
+ "learning_rate": 0.0002,
121
+ "loss": 0.8615,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.06,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.8672,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.06,
132
+ "eval_loss": 0.7779108881950378,
133
+ "eval_runtime": 110.9863,
134
+ "eval_samples_per_second": 9.01,
135
+ "eval_steps_per_second": 4.505,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.06,
140
+ "mmlu_eval_accuracy": 0.4744171116325413,
141
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
+ "mmlu_eval_accuracy_astronomy": 0.4375,
144
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
146
+ "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
154
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
157
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
158
+ "mmlu_eval_accuracy_global_facts": 0.4,
159
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
160
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
163
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
168
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
172
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
175
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
176
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
177
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
180
+ "mmlu_eval_accuracy_marketing": 0.88,
181
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
183
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
185
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
+ "mmlu_eval_accuracy_philosophy": 0.5,
187
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
190
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
192
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
195
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
+ "mmlu_loss": 1.5868234255450824,
199
+ "step": 200
200
+ },
201
+ {
202
+ "epoch": 0.07,
203
+ "learning_rate": 0.0002,
204
+ "loss": 0.8316,
205
+ "step": 210
206
+ },
207
+ {
208
+ "epoch": 0.07,
209
+ "learning_rate": 0.0002,
210
+ "loss": 0.8454,
211
+ "step": 220
212
+ },
213
+ {
214
+ "epoch": 0.07,
215
+ "learning_rate": 0.0002,
216
+ "loss": 0.8434,
217
+ "step": 230
218
+ },
219
+ {
220
+ "epoch": 0.08,
221
+ "learning_rate": 0.0002,
222
+ "loss": 0.821,
223
+ "step": 240
224
+ },
225
+ {
226
+ "epoch": 0.08,
227
+ "learning_rate": 0.0002,
228
+ "loss": 0.7893,
229
+ "step": 250
230
+ },
231
+ {
232
+ "epoch": 0.08,
233
+ "learning_rate": 0.0002,
234
+ "loss": 0.8242,
235
+ "step": 260
236
+ },
237
+ {
238
+ "epoch": 0.09,
239
+ "learning_rate": 0.0002,
240
+ "loss": 0.8128,
241
+ "step": 270
242
+ },
243
+ {
244
+ "epoch": 0.09,
245
+ "learning_rate": 0.0002,
246
+ "loss": 0.8344,
247
+ "step": 280
248
+ },
249
+ {
250
+ "epoch": 0.09,
251
+ "learning_rate": 0.0002,
252
+ "loss": 0.8338,
253
+ "step": 290
254
+ },
255
+ {
256
+ "epoch": 0.1,
257
+ "learning_rate": 0.0002,
258
+ "loss": 0.7981,
259
+ "step": 300
260
+ },
261
+ {
262
+ "epoch": 0.1,
263
+ "learning_rate": 0.0002,
264
+ "loss": 0.781,
265
+ "step": 310
266
+ },
267
+ {
268
+ "epoch": 0.1,
269
+ "learning_rate": 0.0002,
270
+ "loss": 0.7717,
271
+ "step": 320
272
+ },
273
+ {
274
+ "epoch": 0.1,
275
+ "learning_rate": 0.0002,
276
+ "loss": 0.767,
277
+ "step": 330
278
+ },
279
+ {
280
+ "epoch": 0.11,
281
+ "learning_rate": 0.0002,
282
+ "loss": 0.7925,
283
+ "step": 340
284
+ },
285
+ {
286
+ "epoch": 0.11,
287
+ "learning_rate": 0.0002,
288
+ "loss": 0.8226,
289
+ "step": 350
290
+ },
291
+ {
292
+ "epoch": 0.11,
293
+ "learning_rate": 0.0002,
294
+ "loss": 0.7912,
295
+ "step": 360
296
+ },
297
+ {
298
+ "epoch": 0.12,
299
+ "learning_rate": 0.0002,
300
+ "loss": 0.8093,
301
+ "step": 370
302
+ },
303
+ {
304
+ "epoch": 0.12,
305
+ "learning_rate": 0.0002,
306
+ "loss": 0.7648,
307
+ "step": 380
308
+ },
309
+ {
310
+ "epoch": 0.12,
311
+ "learning_rate": 0.0002,
312
+ "loss": 0.7866,
313
+ "step": 390
314
+ },
315
+ {
316
+ "epoch": 0.13,
317
+ "learning_rate": 0.0002,
318
+ "loss": 0.7976,
319
+ "step": 400
320
+ },
321
+ {
322
+ "epoch": 0.13,
323
+ "eval_loss": 0.7656086683273315,
324
+ "eval_runtime": 110.9802,
325
+ "eval_samples_per_second": 9.011,
326
+ "eval_steps_per_second": 4.505,
327
+ "step": 400
328
+ },
329
+ {
330
+ "epoch": 0.13,
331
+ "mmlu_eval_accuracy": 0.47124130233512024,
332
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
+ "mmlu_eval_accuracy_astronomy": 0.4375,
335
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
+ "mmlu_eval_accuracy_college_biology": 0.4375,
338
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
339
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
343
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
344
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
348
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
+ "mmlu_eval_accuracy_global_facts": 0.4,
350
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
354
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
359
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
368
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
371
+ "mmlu_eval_accuracy_marketing": 0.84,
372
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
374
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
376
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
+ "mmlu_eval_accuracy_philosophy": 0.5,
378
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
379
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
385
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
+ "mmlu_eval_accuracy_virology": 0.5,
388
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
+ "mmlu_loss": 1.4339068503199297,
390
+ "step": 400
391
+ },
392
+ {
393
+ "epoch": 0.13,
394
+ "learning_rate": 0.0002,
395
+ "loss": 0.8182,
396
+ "step": 410
397
+ },
398
+ {
399
+ "epoch": 0.13,
400
+ "learning_rate": 0.0002,
401
+ "loss": 0.8438,
402
+ "step": 420
403
+ },
404
+ {
405
+ "epoch": 0.14,
406
+ "learning_rate": 0.0002,
407
+ "loss": 0.8184,
408
+ "step": 430
409
+ },
410
+ {
411
+ "epoch": 0.14,
412
+ "learning_rate": 0.0002,
413
+ "loss": 0.8202,
414
+ "step": 440
415
+ },
416
+ {
417
+ "epoch": 0.14,
418
+ "learning_rate": 0.0002,
419
+ "loss": 0.8264,
420
+ "step": 450
421
+ },
422
+ {
423
+ "epoch": 0.15,
424
+ "learning_rate": 0.0002,
425
+ "loss": 0.8384,
426
+ "step": 460
427
+ },
428
+ {
429
+ "epoch": 0.15,
430
+ "learning_rate": 0.0002,
431
+ "loss": 0.8372,
432
+ "step": 470
433
+ },
434
+ {
435
+ "epoch": 0.15,
436
+ "learning_rate": 0.0002,
437
+ "loss": 0.8072,
438
+ "step": 480
439
+ },
440
+ {
441
+ "epoch": 0.16,
442
+ "learning_rate": 0.0002,
443
+ "loss": 0.8214,
444
+ "step": 490
445
+ },
446
+ {
447
+ "epoch": 0.16,
448
+ "learning_rate": 0.0002,
449
+ "loss": 0.814,
450
+ "step": 500
451
+ },
452
+ {
453
+ "epoch": 0.16,
454
+ "learning_rate": 0.0002,
455
+ "loss": 0.847,
456
+ "step": 510
457
+ },
458
+ {
459
+ "epoch": 0.16,
460
+ "learning_rate": 0.0002,
461
+ "loss": 0.8444,
462
+ "step": 520
463
+ },
464
+ {
465
+ "epoch": 0.17,
466
+ "learning_rate": 0.0002,
467
+ "loss": 0.8096,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.17,
472
+ "learning_rate": 0.0002,
473
+ "loss": 0.8496,
474
+ "step": 540
475
+ },
476
+ {
477
+ "epoch": 0.17,
478
+ "learning_rate": 0.0002,
479
+ "loss": 0.7729,
480
+ "step": 550
481
+ },
482
+ {
483
+ "epoch": 0.18,
484
+ "learning_rate": 0.0002,
485
+ "loss": 0.7826,
486
+ "step": 560
487
+ },
488
+ {
489
+ "epoch": 0.18,
490
+ "learning_rate": 0.0002,
491
+ "loss": 0.7478,
492
+ "step": 570
493
+ },
494
+ {
495
+ "epoch": 0.18,
496
+ "learning_rate": 0.0002,
497
+ "loss": 0.7953,
498
+ "step": 580
499
+ },
500
+ {
501
+ "epoch": 0.19,
502
+ "learning_rate": 0.0002,
503
+ "loss": 0.7363,
504
+ "step": 590
505
+ },
506
+ {
507
+ "epoch": 0.19,
508
+ "learning_rate": 0.0002,
509
+ "loss": 0.7971,
510
+ "step": 600
511
+ },
512
+ {
513
+ "epoch": 0.19,
514
+ "eval_loss": 0.7616064548492432,
515
+ "eval_runtime": 110.9404,
516
+ "eval_samples_per_second": 9.014,
517
+ "eval_steps_per_second": 4.507,
518
+ "step": 600
519
+ },
520
+ {
521
+ "epoch": 0.19,
522
+ "mmlu_eval_accuracy": 0.4749850916074463,
523
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
524
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
525
+ "mmlu_eval_accuracy_astronomy": 0.4375,
526
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
527
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
528
+ "mmlu_eval_accuracy_college_biology": 0.4375,
529
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
530
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
531
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
532
+ "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
533
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
534
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
536
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
537
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
538
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
539
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
540
+ "mmlu_eval_accuracy_global_facts": 0.3,
541
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
542
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
543
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
544
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
545
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
546
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
547
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
548
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
549
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
550
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
551
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
552
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
553
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
554
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
555
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
556
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
557
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
558
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
559
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
560
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
561
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
562
+ "mmlu_eval_accuracy_marketing": 0.84,
563
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
564
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
565
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
566
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
567
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
568
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
569
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
570
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
571
+ "mmlu_eval_accuracy_professional_law": 0.3,
572
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
573
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
574
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
575
+ "mmlu_eval_accuracy_security_studies": 0.5555555555555556,
576
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
577
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
578
+ "mmlu_eval_accuracy_virology": 0.5,
579
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
580
+ "mmlu_loss": 1.5647042619341658,
581
+ "step": 600
582
+ },
583
+ {
584
+ "epoch": 0.19,
585
+ "learning_rate": 0.0002,
586
+ "loss": 0.7936,
587
+ "step": 610
588
+ },
589
+ {
590
+ "epoch": 0.2,
591
+ "learning_rate": 0.0002,
592
+ "loss": 0.7319,
593
+ "step": 620
594
+ },
595
+ {
596
+ "epoch": 0.2,
597
+ "learning_rate": 0.0002,
598
+ "loss": 0.79,
599
+ "step": 630
600
+ },
601
+ {
602
+ "epoch": 0.2,
603
+ "learning_rate": 0.0002,
604
+ "loss": 0.7806,
605
+ "step": 640
606
+ },
607
+ {
608
+ "epoch": 0.21,
609
+ "learning_rate": 0.0002,
610
+ "loss": 0.8833,
611
+ "step": 650
612
+ },
613
+ {
614
+ "epoch": 0.21,
615
+ "learning_rate": 0.0002,
616
+ "loss": 0.7711,
617
+ "step": 660
618
+ },
619
+ {
620
+ "epoch": 0.21,
621
+ "learning_rate": 0.0002,
622
+ "loss": 0.8242,
623
+ "step": 670
624
+ },
625
+ {
626
+ "epoch": 0.22,
627
+ "learning_rate": 0.0002,
628
+ "loss": 0.7948,
629
+ "step": 680
630
+ },
631
+ {
632
+ "epoch": 0.22,
633
+ "learning_rate": 0.0002,
634
+ "loss": 0.7417,
635
+ "step": 690
636
+ },
637
+ {
638
+ "epoch": 0.22,
639
+ "learning_rate": 0.0002,
640
+ "loss": 0.7275,
641
+ "step": 700
642
+ },
643
+ {
644
+ "epoch": 0.22,
645
+ "learning_rate": 0.0002,
646
+ "loss": 0.8137,
647
+ "step": 710
648
+ },
649
+ {
650
+ "epoch": 0.23,
651
+ "learning_rate": 0.0002,
652
+ "loss": 0.8568,
653
+ "step": 720
654
+ },
655
+ {
656
+ "epoch": 0.23,
657
+ "learning_rate": 0.0002,
658
+ "loss": 0.802,
659
+ "step": 730
660
+ },
661
+ {
662
+ "epoch": 0.23,
663
+ "learning_rate": 0.0002,
664
+ "loss": 0.8202,
665
+ "step": 740
666
+ },
667
+ {
668
+ "epoch": 0.24,
669
+ "learning_rate": 0.0002,
670
+ "loss": 0.8077,
671
+ "step": 750
672
+ },
673
+ {
674
+ "epoch": 0.24,
675
+ "learning_rate": 0.0002,
676
+ "loss": 0.814,
677
+ "step": 760
678
+ },
679
+ {
680
+ "epoch": 0.24,
681
+ "learning_rate": 0.0002,
682
+ "loss": 0.7971,
683
+ "step": 770
684
+ },
685
+ {
686
+ "epoch": 0.25,
687
+ "learning_rate": 0.0002,
688
+ "loss": 0.798,
689
+ "step": 780
690
+ },
691
+ {
692
+ "epoch": 0.25,
693
+ "learning_rate": 0.0002,
694
+ "loss": 0.7806,
695
+ "step": 790
696
+ },
697
+ {
698
+ "epoch": 0.25,
699
+ "learning_rate": 0.0002,
700
+ "loss": 0.8042,
701
+ "step": 800
702
+ },
703
+ {
704
+ "epoch": 0.25,
705
+ "eval_loss": 0.7563537359237671,
706
+ "eval_runtime": 111.023,
707
+ "eval_samples_per_second": 9.007,
708
+ "eval_steps_per_second": 4.504,
709
+ "step": 800
710
+ },
711
+ {
712
+ "epoch": 0.25,
713
+ "mmlu_eval_accuracy": 0.4796267144005645,
714
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
715
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
716
+ "mmlu_eval_accuracy_astronomy": 0.4375,
717
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
718
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
719
+ "mmlu_eval_accuracy_college_biology": 0.4375,
720
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
721
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
722
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
723
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
724
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
725
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
726
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
727
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
728
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
729
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
730
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
731
+ "mmlu_eval_accuracy_global_facts": 0.4,
732
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
733
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
734
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
736
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
737
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
738
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
739
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
740
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
741
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
742
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
743
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
744
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
745
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
746
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
747
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
748
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
749
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
750
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
751
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
752
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
753
+ "mmlu_eval_accuracy_marketing": 0.84,
754
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
755
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
756
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
757
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
758
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
759
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
760
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
761
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
762
+ "mmlu_eval_accuracy_professional_law": 0.3,
763
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
764
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
765
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
766
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
767
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
768
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
769
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
770
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
771
+ "mmlu_loss": 1.4866046660796157,
772
+ "step": 800
773
+ },
774
+ {
775
+ "epoch": 0.26,
776
+ "learning_rate": 0.0002,
777
+ "loss": 0.8119,
778
+ "step": 810
779
+ },
780
+ {
781
+ "epoch": 0.26,
782
+ "learning_rate": 0.0002,
783
+ "loss": 0.8156,
784
+ "step": 820
785
+ },
786
+ {
787
+ "epoch": 0.26,
788
+ "learning_rate": 0.0002,
789
+ "loss": 0.8288,
790
+ "step": 830
791
+ },
792
+ {
793
+ "epoch": 0.27,
794
+ "learning_rate": 0.0002,
795
+ "loss": 0.8008,
796
+ "step": 840
797
+ },
798
+ {
799
+ "epoch": 0.27,
800
+ "learning_rate": 0.0002,
801
+ "loss": 0.8649,
802
+ "step": 850
803
+ },
804
+ {
805
+ "epoch": 0.27,
806
+ "learning_rate": 0.0002,
807
+ "loss": 0.8242,
808
+ "step": 860
809
+ },
810
+ {
811
+ "epoch": 0.28,
812
+ "learning_rate": 0.0002,
813
+ "loss": 0.8255,
814
+ "step": 870
815
+ },
816
+ {
817
+ "epoch": 0.28,
818
+ "learning_rate": 0.0002,
819
+ "loss": 0.8467,
820
+ "step": 880
821
+ },
822
+ {
823
+ "epoch": 0.28,
824
+ "learning_rate": 0.0002,
825
+ "loss": 0.8264,
826
+ "step": 890
827
+ },
828
+ {
829
+ "epoch": 0.29,
830
+ "learning_rate": 0.0002,
831
+ "loss": 0.7833,
832
+ "step": 900
833
+ },
834
+ {
835
+ "epoch": 0.29,
836
+ "learning_rate": 0.0002,
837
+ "loss": 0.8338,
838
+ "step": 910
839
+ },
840
+ {
841
+ "epoch": 0.29,
842
+ "learning_rate": 0.0002,
843
+ "loss": 0.8062,
844
+ "step": 920
845
+ },
846
+ {
847
+ "epoch": 0.29,
848
+ "learning_rate": 0.0002,
849
+ "loss": 0.8112,
850
+ "step": 930
851
+ },
852
+ {
853
+ "epoch": 0.3,
854
+ "learning_rate": 0.0002,
855
+ "loss": 0.7469,
856
+ "step": 940
857
+ },
858
+ {
859
+ "epoch": 0.3,
860
+ "learning_rate": 0.0002,
861
+ "loss": 0.7897,
862
+ "step": 950
863
+ },
864
+ {
865
+ "epoch": 0.3,
866
+ "learning_rate": 0.0002,
867
+ "loss": 0.8081,
868
+ "step": 960
869
+ },
870
+ {
871
+ "epoch": 0.31,
872
+ "learning_rate": 0.0002,
873
+ "loss": 0.7571,
874
+ "step": 970
875
+ },
876
+ {
877
+ "epoch": 0.31,
878
+ "learning_rate": 0.0002,
879
+ "loss": 0.8161,
880
+ "step": 980
881
+ },
882
+ {
883
+ "epoch": 0.31,
884
+ "learning_rate": 0.0002,
885
+ "loss": 0.7759,
886
+ "step": 990
887
+ },
888
+ {
889
+ "epoch": 0.32,
890
+ "learning_rate": 0.0002,
891
+ "loss": 0.7417,
892
+ "step": 1000
893
+ },
894
+ {
895
+ "epoch": 0.32,
896
+ "eval_loss": 0.754473865032196,
897
+ "eval_runtime": 111.0233,
898
+ "eval_samples_per_second": 9.007,
899
+ "eval_steps_per_second": 4.504,
900
+ "step": 1000
901
+ },
902
+ {
903
+ "epoch": 0.32,
904
+ "mmlu_eval_accuracy": 0.4749030525395577,
905
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
906
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
907
+ "mmlu_eval_accuracy_astronomy": 0.4375,
908
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
909
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
910
+ "mmlu_eval_accuracy_college_biology": 0.4375,
911
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
912
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
913
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
914
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
915
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
916
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
917
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
918
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
919
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
920
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
921
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
922
+ "mmlu_eval_accuracy_global_facts": 0.4,
923
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
924
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
925
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
926
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
927
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
928
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
929
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
930
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
931
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.2692307692307692,
932
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
933
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
934
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
935
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
936
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
937
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
938
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
939
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
940
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
941
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
942
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
943
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
944
+ "mmlu_eval_accuracy_marketing": 0.88,
945
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
946
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
947
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
948
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
949
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
950
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
951
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
952
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
953
+ "mmlu_eval_accuracy_professional_law": 0.29411764705882354,
954
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
955
+ "mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
956
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
957
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
958
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
959
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
960
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
961
+ "mmlu_eval_accuracy_world_religions": 0.8421052631578947,
962
+ "mmlu_loss": 1.596783688734468,
963
+ "step": 1000
964
+ },
965
+ {
966
+ "epoch": 0.32,
967
+ "learning_rate": 0.0002,
968
+ "loss": 0.827,
969
+ "step": 1010
970
+ },
971
+ {
972
+ "epoch": 0.32,
973
+ "learning_rate": 0.0002,
974
+ "loss": 0.8345,
975
+ "step": 1020
976
+ },
977
+ {
978
+ "epoch": 0.33,
979
+ "learning_rate": 0.0002,
980
+ "loss": 0.7883,
981
+ "step": 1030
982
+ },
983
+ {
984
+ "epoch": 0.33,
985
+ "learning_rate": 0.0002,
986
+ "loss": 0.7774,
987
+ "step": 1040
988
+ },
989
+ {
990
+ "epoch": 0.33,
991
+ "learning_rate": 0.0002,
992
+ "loss": 0.8175,
993
+ "step": 1050
994
+ },
995
+ {
996
+ "epoch": 0.34,
997
+ "learning_rate": 0.0002,
998
+ "loss": 0.8,
999
+ "step": 1060
1000
+ },
1001
+ {
1002
+ "epoch": 0.34,
1003
+ "learning_rate": 0.0002,
1004
+ "loss": 0.8049,
1005
+ "step": 1070
1006
+ },
1007
+ {
1008
+ "epoch": 0.34,
1009
+ "learning_rate": 0.0002,
1010
+ "loss": 0.8116,
1011
+ "step": 1080
1012
+ },
1013
+ {
1014
+ "epoch": 0.35,
1015
+ "learning_rate": 0.0002,
1016
+ "loss": 0.7852,
1017
+ "step": 1090
1018
+ },
1019
+ {
1020
+ "epoch": 0.35,
1021
+ "learning_rate": 0.0002,
1022
+ "loss": 0.7429,
1023
+ "step": 1100
1024
+ },
1025
+ {
1026
+ "epoch": 0.35,
1027
+ "learning_rate": 0.0002,
1028
+ "loss": 0.794,
1029
+ "step": 1110
1030
+ },
1031
+ {
1032
+ "epoch": 0.35,
1033
+ "learning_rate": 0.0002,
1034
+ "loss": 0.7549,
1035
+ "step": 1120
1036
+ },
1037
+ {
1038
+ "epoch": 0.36,
1039
+ "learning_rate": 0.0002,
1040
+ "loss": 0.7347,
1041
+ "step": 1130
1042
+ },
1043
+ {
1044
+ "epoch": 0.36,
1045
+ "learning_rate": 0.0002,
1046
+ "loss": 0.7482,
1047
+ "step": 1140
1048
+ },
1049
+ {
1050
+ "epoch": 0.36,
1051
+ "learning_rate": 0.0002,
1052
+ "loss": 0.7393,
1053
+ "step": 1150
1054
+ },
1055
+ {
1056
+ "epoch": 0.37,
1057
+ "learning_rate": 0.0002,
1058
+ "loss": 0.8103,
1059
+ "step": 1160
1060
+ },
1061
+ {
1062
+ "epoch": 0.37,
1063
+ "learning_rate": 0.0002,
1064
+ "loss": 0.8075,
1065
+ "step": 1170
1066
+ },
1067
+ {
1068
+ "epoch": 0.37,
1069
+ "learning_rate": 0.0002,
1070
+ "loss": 0.7831,
1071
+ "step": 1180
1072
+ },
1073
+ {
1074
+ "epoch": 0.38,
1075
+ "learning_rate": 0.0002,
1076
+ "loss": 0.792,
1077
+ "step": 1190
1078
+ },
1079
+ {
1080
+ "epoch": 0.38,
1081
+ "learning_rate": 0.0002,
1082
+ "loss": 0.7955,
1083
+ "step": 1200
1084
+ },
1085
+ {
1086
+ "epoch": 0.38,
1087
+ "eval_loss": 0.7498393654823303,
1088
+ "eval_runtime": 110.9719,
1089
+ "eval_samples_per_second": 9.011,
1090
+ "eval_steps_per_second": 4.506,
1091
+ "step": 1200
1092
+ },
1093
+ {
1094
+ "epoch": 0.38,
1095
+ "mmlu_eval_accuracy": 0.4769718071089565,
1096
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1097
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
1098
+ "mmlu_eval_accuracy_astronomy": 0.4375,
1099
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1100
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
1101
+ "mmlu_eval_accuracy_college_biology": 0.4375,
1102
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1103
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
1104
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
1105
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
1106
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
1107
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
1108
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
1109
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1110
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
1111
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1112
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
1113
+ "mmlu_eval_accuracy_global_facts": 0.4,
1114
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
1115
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
1116
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1117
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
1118
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
1119
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1120
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
1121
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1122
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
1123
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
1124
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
1125
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
1126
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
1127
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
1128
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
1129
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1130
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
1131
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1132
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
1133
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1134
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1135
+ "mmlu_eval_accuracy_marketing": 0.84,
1136
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
1137
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
1138
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1139
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1140
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
1141
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
1142
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
1143
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
1144
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
1145
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
1146
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
1147
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1148
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1149
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
1150
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1151
+ "mmlu_eval_accuracy_virology": 0.5,
1152
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
1153
+ "mmlu_loss": 1.6994269322166244,
1154
+ "step": 1200
1155
+ },
1156
+ {
1157
+ "epoch": 0.38,
1158
+ "learning_rate": 0.0002,
1159
+ "loss": 0.7821,
1160
+ "step": 1210
1161
+ },
1162
+ {
1163
+ "epoch": 0.39,
1164
+ "learning_rate": 0.0002,
1165
+ "loss": 0.8138,
1166
+ "step": 1220
1167
+ },
1168
+ {
1169
+ "epoch": 0.39,
1170
+ "learning_rate": 0.0002,
1171
+ "loss": 0.717,
1172
+ "step": 1230
1173
+ },
1174
+ {
1175
+ "epoch": 0.39,
1176
+ "learning_rate": 0.0002,
1177
+ "loss": 0.7406,
1178
+ "step": 1240
1179
+ },
1180
+ {
1181
+ "epoch": 0.4,
1182
+ "learning_rate": 0.0002,
1183
+ "loss": 0.8031,
1184
+ "step": 1250
1185
+ },
1186
+ {
1187
+ "epoch": 0.4,
1188
+ "learning_rate": 0.0002,
1189
+ "loss": 0.7974,
1190
+ "step": 1260
1191
+ },
1192
+ {
1193
+ "epoch": 0.4,
1194
+ "learning_rate": 0.0002,
1195
+ "loss": 0.8001,
1196
+ "step": 1270
1197
+ },
1198
+ {
1199
+ "epoch": 0.41,
1200
+ "learning_rate": 0.0002,
1201
+ "loss": 0.766,
1202
+ "step": 1280
1203
+ },
1204
+ {
1205
+ "epoch": 0.41,
1206
+ "learning_rate": 0.0002,
1207
+ "loss": 0.7679,
1208
+ "step": 1290
1209
+ },
1210
+ {
1211
+ "epoch": 0.41,
1212
+ "learning_rate": 0.0002,
1213
+ "loss": 0.8017,
1214
+ "step": 1300
1215
+ },
1216
+ {
1217
+ "epoch": 0.42,
1218
+ "learning_rate": 0.0002,
1219
+ "loss": 0.8027,
1220
+ "step": 1310
1221
+ },
1222
+ {
1223
+ "epoch": 0.42,
1224
+ "learning_rate": 0.0002,
1225
+ "loss": 0.7819,
1226
+ "step": 1320
1227
+ },
1228
+ {
1229
+ "epoch": 0.42,
1230
+ "learning_rate": 0.0002,
1231
+ "loss": 0.7558,
1232
+ "step": 1330
1233
+ },
1234
+ {
1235
+ "epoch": 0.42,
1236
+ "learning_rate": 0.0002,
1237
+ "loss": 0.8363,
1238
+ "step": 1340
1239
+ },
1240
+ {
1241
+ "epoch": 0.43,
1242
+ "learning_rate": 0.0002,
1243
+ "loss": 0.7809,
1244
+ "step": 1350
1245
+ },
1246
+ {
1247
+ "epoch": 0.43,
1248
+ "learning_rate": 0.0002,
1249
+ "loss": 0.8114,
1250
+ "step": 1360
1251
+ },
1252
+ {
1253
+ "epoch": 0.43,
1254
+ "learning_rate": 0.0002,
1255
+ "loss": 0.8446,
1256
+ "step": 1370
1257
+ },
1258
+ {
1259
+ "epoch": 0.44,
1260
+ "learning_rate": 0.0002,
1261
+ "loss": 0.7877,
1262
+ "step": 1380
1263
+ },
1264
+ {
1265
+ "epoch": 0.44,
1266
+ "learning_rate": 0.0002,
1267
+ "loss": 0.8309,
1268
+ "step": 1390
1269
+ },
1270
+ {
1271
+ "epoch": 0.44,
1272
+ "learning_rate": 0.0002,
1273
+ "loss": 0.8131,
1274
+ "step": 1400
1275
+ },
1276
+ {
1277
+ "epoch": 0.44,
1278
+ "eval_loss": 0.7500243186950684,
1279
+ "eval_runtime": 111.0409,
1280
+ "eval_samples_per_second": 9.006,
1281
+ "eval_steps_per_second": 4.503,
1282
+ "step": 1400
1283
+ },
1284
+ {
1285
+ "epoch": 0.44,
1286
+ "mmlu_eval_accuracy": 0.4758070782649682,
1287
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1288
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
1289
+ "mmlu_eval_accuracy_astronomy": 0.4375,
1290
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1291
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
1292
+ "mmlu_eval_accuracy_college_biology": 0.5,
1293
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1294
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
1295
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
1296
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1297
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
1298
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
1299
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
1300
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1301
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
1302
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1303
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
1304
+ "mmlu_eval_accuracy_global_facts": 0.4,
1305
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1306
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
1307
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1308
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
1309
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
1310
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.42857142857142855,
1311
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
1312
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1313
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
1314
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
1315
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
1316
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1317
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
1318
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
1319
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
1320
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1321
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
1322
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1323
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
1324
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
1325
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1326
+ "mmlu_eval_accuracy_marketing": 0.88,
1327
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
1328
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
1329
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1330
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1331
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
1332
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
1333
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
1334
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
1335
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
1336
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
1337
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
1338
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1339
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
1340
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
1341
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
1342
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
1343
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1344
+ "mmlu_loss": 1.4974891513041355,
1345
+ "step": 1400
1346
+ },
1347
+ {
1348
+ "epoch": 0.45,
1349
+ "learning_rate": 0.0002,
1350
+ "loss": 0.8122,
1351
+ "step": 1410
1352
+ },
1353
+ {
1354
+ "epoch": 0.45,
1355
+ "learning_rate": 0.0002,
1356
+ "loss": 0.7754,
1357
+ "step": 1420
1358
+ },
1359
+ {
1360
+ "epoch": 0.45,
1361
+ "learning_rate": 0.0002,
1362
+ "loss": 0.8116,
1363
+ "step": 1430
1364
+ },
1365
+ {
1366
+ "epoch": 0.46,
1367
+ "learning_rate": 0.0002,
1368
+ "loss": 0.7442,
1369
+ "step": 1440
1370
+ },
1371
+ {
1372
+ "epoch": 0.46,
1373
+ "learning_rate": 0.0002,
1374
+ "loss": 0.7638,
1375
+ "step": 1450
1376
+ },
1377
+ {
1378
+ "epoch": 0.46,
1379
+ "learning_rate": 0.0002,
1380
+ "loss": 0.7746,
1381
+ "step": 1460
1382
+ },
1383
+ {
1384
+ "epoch": 0.47,
1385
+ "learning_rate": 0.0002,
1386
+ "loss": 0.7616,
1387
+ "step": 1470
1388
+ },
1389
+ {
1390
+ "epoch": 0.47,
1391
+ "learning_rate": 0.0002,
1392
+ "loss": 0.8144,
1393
+ "step": 1480
1394
+ },
1395
+ {
1396
+ "epoch": 0.47,
1397
+ "learning_rate": 0.0002,
1398
+ "loss": 0.7924,
1399
+ "step": 1490
1400
+ },
1401
+ {
1402
+ "epoch": 0.48,
1403
+ "learning_rate": 0.0002,
1404
+ "loss": 0.8075,
1405
+ "step": 1500
1406
+ },
1407
+ {
1408
+ "epoch": 0.48,
1409
+ "learning_rate": 0.0002,
1410
+ "loss": 0.769,
1411
+ "step": 1510
1412
+ },
1413
+ {
1414
+ "epoch": 0.48,
1415
+ "learning_rate": 0.0002,
1416
+ "loss": 0.7296,
1417
+ "step": 1520
1418
+ },
1419
+ {
1420
+ "epoch": 0.48,
1421
+ "learning_rate": 0.0002,
1422
+ "loss": 0.8284,
1423
+ "step": 1530
1424
+ },
1425
+ {
1426
+ "epoch": 0.49,
1427
+ "learning_rate": 0.0002,
1428
+ "loss": 0.82,
1429
+ "step": 1540
1430
+ },
1431
+ {
1432
+ "epoch": 0.49,
1433
+ "learning_rate": 0.0002,
1434
+ "loss": 0.7619,
1435
+ "step": 1550
1436
+ },
1437
+ {
1438
+ "epoch": 0.49,
1439
+ "learning_rate": 0.0002,
1440
+ "loss": 0.7862,
1441
+ "step": 1560
1442
+ },
1443
+ {
1444
+ "epoch": 0.5,
1445
+ "learning_rate": 0.0002,
1446
+ "loss": 0.7835,
1447
+ "step": 1570
1448
+ },
1449
+ {
1450
+ "epoch": 0.5,
1451
+ "learning_rate": 0.0002,
1452
+ "loss": 0.7624,
1453
+ "step": 1580
1454
+ },
1455
+ {
1456
+ "epoch": 0.5,
1457
+ "learning_rate": 0.0002,
1458
+ "loss": 0.8021,
1459
+ "step": 1590
1460
+ },
1461
+ {
1462
+ "epoch": 0.51,
1463
+ "learning_rate": 0.0002,
1464
+ "loss": 0.793,
1465
+ "step": 1600
1466
+ },
1467
+ {
1468
+ "epoch": 0.51,
1469
+ "eval_loss": 0.7459111213684082,
1470
+ "eval_runtime": 111.062,
1471
+ "eval_samples_per_second": 9.004,
1472
+ "eval_steps_per_second": 4.502,
1473
+ "step": 1600
1474
+ },
1475
+ {
1476
+ "epoch": 0.51,
1477
+ "mmlu_eval_accuracy": 0.46888485292306403,
1478
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1479
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
1480
+ "mmlu_eval_accuracy_astronomy": 0.4375,
1481
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1482
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
1483
+ "mmlu_eval_accuracy_college_biology": 0.4375,
1484
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1485
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
1486
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
1487
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1488
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
1489
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
1490
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
1491
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1492
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
1493
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1494
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
1495
+ "mmlu_eval_accuracy_global_facts": 0.3,
1496
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1497
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
1498
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1499
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
1500
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
1501
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1502
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
1503
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1504
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
1505
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
1506
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
1507
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1508
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
1509
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
1510
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
1511
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1512
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
1513
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
1514
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
1515
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
1516
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
1517
+ "mmlu_eval_accuracy_marketing": 0.88,
1518
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
1519
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
1520
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
1521
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1522
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
1523
+ "mmlu_eval_accuracy_philosophy": 0.5,
1524
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
1525
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
1526
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
1527
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
1528
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
1529
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1530
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
1531
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
1532
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1533
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
1534
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
1535
+ "mmlu_loss": 1.6951039852112453,
1536
+ "step": 1600
1537
+ },
1538
+ {
1539
+ "epoch": 0.51,
1540
+ "learning_rate": 0.0002,
1541
+ "loss": 0.7999,
1542
+ "step": 1610
1543
+ },
1544
+ {
1545
+ "epoch": 0.51,
1546
+ "learning_rate": 0.0002,
1547
+ "loss": 0.7959,
1548
+ "step": 1620
1549
+ },
1550
+ {
1551
+ "epoch": 0.52,
1552
+ "learning_rate": 0.0002,
1553
+ "loss": 0.7887,
1554
+ "step": 1630
1555
+ },
1556
+ {
1557
+ "epoch": 0.52,
1558
+ "learning_rate": 0.0002,
1559
+ "loss": 0.7186,
1560
+ "step": 1640
1561
+ },
1562
+ {
1563
+ "epoch": 0.52,
1564
+ "learning_rate": 0.0002,
1565
+ "loss": 0.8049,
1566
+ "step": 1650
1567
+ },
1568
+ {
1569
+ "epoch": 0.53,
1570
+ "learning_rate": 0.0002,
1571
+ "loss": 0.7934,
1572
+ "step": 1660
1573
+ },
1574
+ {
1575
+ "epoch": 0.53,
1576
+ "learning_rate": 0.0002,
1577
+ "loss": 0.8369,
1578
+ "step": 1670
1579
+ },
1580
+ {
1581
+ "epoch": 0.53,
1582
+ "learning_rate": 0.0002,
1583
+ "loss": 0.7567,
1584
+ "step": 1680
1585
+ },
1586
+ {
1587
+ "epoch": 0.54,
1588
+ "learning_rate": 0.0002,
1589
+ "loss": 0.8058,
1590
+ "step": 1690
1591
+ },
1592
+ {
1593
+ "epoch": 0.54,
1594
+ "learning_rate": 0.0002,
1595
+ "loss": 0.7818,
1596
+ "step": 1700
1597
+ },
1598
+ {
1599
+ "epoch": 0.54,
1600
+ "learning_rate": 0.0002,
1601
+ "loss": 0.7115,
1602
+ "step": 1710
1603
+ },
1604
+ {
1605
+ "epoch": 0.54,
1606
+ "learning_rate": 0.0002,
1607
+ "loss": 0.7434,
1608
+ "step": 1720
1609
+ },
1610
+ {
1611
+ "epoch": 0.55,
1612
+ "learning_rate": 0.0002,
1613
+ "loss": 0.7788,
1614
+ "step": 1730
1615
+ },
1616
+ {
1617
+ "epoch": 0.55,
1618
+ "learning_rate": 0.0002,
1619
+ "loss": 0.7824,
1620
+ "step": 1740
1621
+ },
1622
+ {
1623
+ "epoch": 0.55,
1624
+ "learning_rate": 0.0002,
1625
+ "loss": 0.7198,
1626
+ "step": 1750
1627
+ },
1628
+ {
1629
+ "epoch": 0.56,
1630
+ "learning_rate": 0.0002,
1631
+ "loss": 0.8059,
1632
+ "step": 1760
1633
+ },
1634
+ {
1635
+ "epoch": 0.56,
1636
+ "learning_rate": 0.0002,
1637
+ "loss": 0.7892,
1638
+ "step": 1770
1639
+ },
1640
+ {
1641
+ "epoch": 0.56,
1642
+ "learning_rate": 0.0002,
1643
+ "loss": 0.8048,
1644
+ "step": 1780
1645
+ },
1646
+ {
1647
+ "epoch": 0.57,
1648
+ "learning_rate": 0.0002,
1649
+ "loss": 0.7938,
1650
+ "step": 1790
1651
+ },
1652
+ {
1653
+ "epoch": 0.57,
1654
+ "learning_rate": 0.0002,
1655
+ "loss": 0.791,
1656
+ "step": 1800
1657
+ },
1658
+ {
1659
+ "epoch": 0.57,
1660
+ "eval_loss": 0.744739830493927,
1661
+ "eval_runtime": 111.1326,
1662
+ "eval_samples_per_second": 8.998,
1663
+ "eval_steps_per_second": 4.499,
1664
+ "step": 1800
1665
+ },
1666
+ {
1667
+ "epoch": 0.57,
1668
+ "mmlu_eval_accuracy": 0.4764276491893982,
1669
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1670
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
1671
+ "mmlu_eval_accuracy_astronomy": 0.4375,
1672
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1673
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
1674
+ "mmlu_eval_accuracy_college_biology": 0.4375,
1675
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
1676
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
1677
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
1678
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1679
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
1680
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
1681
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
1682
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1683
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
1684
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1685
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
1686
+ "mmlu_eval_accuracy_global_facts": 0.3,
1687
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1688
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
1689
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1690
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
1691
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
1692
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1693
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
1694
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1695
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
1696
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
1697
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
1698
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1699
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
1700
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
1701
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
1702
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1703
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
1704
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1705
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
1706
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
1707
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1708
+ "mmlu_eval_accuracy_marketing": 0.88,
1709
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
1710
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
1711
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1712
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1713
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
1714
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
1715
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
1716
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
1717
+ "mmlu_eval_accuracy_professional_law": 0.3,
1718
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
1719
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
1720
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1721
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1722
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
1723
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
1724
+ "mmlu_eval_accuracy_virology": 0.5,
1725
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1726
+ "mmlu_loss": 1.7224162761754218,
1727
+ "step": 1800
1728
+ },
1729
+ {
1730
+ "epoch": 0.57,
1731
+ "learning_rate": 0.0002,
1732
+ "loss": 0.7335,
1733
+ "step": 1810
1734
+ },
1735
+ {
1736
+ "epoch": 0.58,
1737
+ "learning_rate": 0.0002,
1738
+ "loss": 0.7762,
1739
+ "step": 1820
1740
+ },
1741
+ {
1742
+ "epoch": 0.58,
1743
+ "learning_rate": 0.0002,
1744
+ "loss": 0.75,
1745
+ "step": 1830
1746
+ },
1747
+ {
1748
+ "epoch": 0.58,
1749
+ "learning_rate": 0.0002,
1750
+ "loss": 0.7875,
1751
+ "step": 1840
1752
+ },
1753
+ {
1754
+ "epoch": 0.59,
1755
+ "learning_rate": 0.0002,
1756
+ "loss": 0.7749,
1757
+ "step": 1850
1758
+ },
1759
+ {
1760
+ "epoch": 0.59,
1761
+ "learning_rate": 0.0002,
1762
+ "loss": 0.8516,
1763
+ "step": 1860
1764
+ },
1765
+ {
1766
+ "epoch": 0.59,
1767
+ "learning_rate": 0.0002,
1768
+ "loss": 0.7729,
1769
+ "step": 1870
1770
+ },
1771
+ {
1772
+ "epoch": 0.6,
1773
+ "learning_rate": 0.0002,
1774
+ "loss": 0.7664,
1775
+ "step": 1880
1776
+ },
1777
+ {
1778
+ "epoch": 0.6,
1779
+ "learning_rate": 0.0002,
1780
+ "loss": 0.802,
1781
+ "step": 1890
1782
+ },
1783
+ {
1784
+ "epoch": 0.6,
1785
+ "learning_rate": 0.0002,
1786
+ "loss": 0.7791,
1787
+ "step": 1900
1788
+ },
1789
+ {
1790
+ "epoch": 0.61,
1791
+ "learning_rate": 0.0002,
1792
+ "loss": 0.8041,
1793
+ "step": 1910
1794
+ },
1795
+ {
1796
+ "epoch": 0.61,
1797
+ "learning_rate": 0.0002,
1798
+ "loss": 0.7671,
1799
+ "step": 1920
1800
+ },
1801
+ {
1802
+ "epoch": 0.61,
1803
+ "learning_rate": 0.0002,
1804
+ "loss": 0.7785,
1805
+ "step": 1930
1806
+ },
1807
+ {
1808
+ "epoch": 0.61,
1809
+ "learning_rate": 0.0002,
1810
+ "loss": 0.782,
1811
+ "step": 1940
1812
+ },
1813
+ {
1814
+ "epoch": 0.62,
1815
+ "learning_rate": 0.0002,
1816
+ "loss": 0.8032,
1817
+ "step": 1950
1818
+ },
1819
+ {
1820
+ "epoch": 0.62,
1821
+ "learning_rate": 0.0002,
1822
+ "loss": 0.8065,
1823
+ "step": 1960
1824
+ },
1825
+ {
1826
+ "epoch": 0.62,
1827
+ "learning_rate": 0.0002,
1828
+ "loss": 0.7713,
1829
+ "step": 1970
1830
+ },
1831
+ {
1832
+ "epoch": 0.63,
1833
+ "learning_rate": 0.0002,
1834
+ "loss": 0.7709,
1835
+ "step": 1980
1836
+ },
1837
+ {
1838
+ "epoch": 0.63,
1839
+ "learning_rate": 0.0002,
1840
+ "loss": 0.8036,
1841
+ "step": 1990
1842
+ },
1843
+ {
1844
+ "epoch": 0.63,
1845
+ "learning_rate": 0.0002,
1846
+ "loss": 0.7614,
1847
+ "step": 2000
1848
+ },
1849
+ {
1850
+ "epoch": 0.63,
1851
+ "eval_loss": 0.7417653799057007,
1852
+ "eval_runtime": 111.078,
1853
+ "eval_samples_per_second": 9.003,
1854
+ "eval_steps_per_second": 4.501,
1855
+ "step": 2000
1856
+ },
1857
+ {
1858
+ "epoch": 0.63,
1859
+ "mmlu_eval_accuracy": 0.4656871532254676,
1860
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1861
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
1862
+ "mmlu_eval_accuracy_astronomy": 0.375,
1863
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1864
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
1865
+ "mmlu_eval_accuracy_college_biology": 0.4375,
1866
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1867
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1868
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1869
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
1870
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
1871
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
1872
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
1873
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1874
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
1875
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
1876
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
1877
+ "mmlu_eval_accuracy_global_facts": 0.5,
1878
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
1879
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
1880
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1881
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
1882
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
1883
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
1884
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
1885
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1886
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
1887
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
1888
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
1889
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
1890
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
1891
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
1892
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1893
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1894
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
1895
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
1896
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
1897
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
1898
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1899
+ "mmlu_eval_accuracy_marketing": 0.76,
1900
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
1901
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
1902
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
1903
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
1904
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
1905
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
1906
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
1907
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
1908
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
1909
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
1910
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
1911
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1912
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
1913
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
1914
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
1915
+ "mmlu_eval_accuracy_virology": 0.5,
1916
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
1917
+ "mmlu_loss": 1.632609389158204,
1918
+ "step": 2000
1919
+ },
1920
+ {
1921
+ "epoch": 0.64,
1922
+ "learning_rate": 0.0002,
1923
+ "loss": 0.8459,
1924
+ "step": 2010
1925
+ },
1926
+ {
1927
+ "epoch": 0.64,
1928
+ "learning_rate": 0.0002,
1929
+ "loss": 0.7348,
1930
+ "step": 2020
1931
+ },
1932
+ {
1933
+ "epoch": 0.64,
1934
+ "learning_rate": 0.0002,
1935
+ "loss": 0.811,
1936
+ "step": 2030
1937
+ },
1938
+ {
1939
+ "epoch": 0.65,
1940
+ "learning_rate": 0.0002,
1941
+ "loss": 0.7091,
1942
+ "step": 2040
1943
+ },
1944
+ {
1945
+ "epoch": 0.65,
1946
+ "learning_rate": 0.0002,
1947
+ "loss": 0.7715,
1948
+ "step": 2050
1949
+ },
1950
+ {
1951
+ "epoch": 0.65,
1952
+ "learning_rate": 0.0002,
1953
+ "loss": 0.8017,
1954
+ "step": 2060
1955
+ },
1956
+ {
1957
+ "epoch": 0.66,
1958
+ "learning_rate": 0.0002,
1959
+ "loss": 0.7734,
1960
+ "step": 2070
1961
+ },
1962
+ {
1963
+ "epoch": 0.66,
1964
+ "learning_rate": 0.0002,
1965
+ "loss": 0.8292,
1966
+ "step": 2080
1967
+ },
1968
+ {
1969
+ "epoch": 0.66,
1970
+ "learning_rate": 0.0002,
1971
+ "loss": 0.7873,
1972
+ "step": 2090
1973
+ },
1974
+ {
1975
+ "epoch": 0.67,
1976
+ "learning_rate": 0.0002,
1977
+ "loss": 0.757,
1978
+ "step": 2100
1979
+ },
1980
+ {
1981
+ "epoch": 0.67,
1982
+ "learning_rate": 0.0002,
1983
+ "loss": 0.7986,
1984
+ "step": 2110
1985
+ },
1986
+ {
1987
+ "epoch": 0.67,
1988
+ "learning_rate": 0.0002,
1989
+ "loss": 0.7848,
1990
+ "step": 2120
1991
+ },
1992
+ {
1993
+ "epoch": 0.67,
1994
+ "learning_rate": 0.0002,
1995
+ "loss": 0.7579,
1996
+ "step": 2130
1997
+ },
1998
+ {
1999
+ "epoch": 0.68,
2000
+ "learning_rate": 0.0002,
2001
+ "loss": 0.7683,
2002
+ "step": 2140
2003
+ },
2004
+ {
2005
+ "epoch": 0.68,
2006
+ "learning_rate": 0.0002,
2007
+ "loss": 0.7958,
2008
+ "step": 2150
2009
+ },
2010
+ {
2011
+ "epoch": 0.68,
2012
+ "learning_rate": 0.0002,
2013
+ "loss": 0.8009,
2014
+ "step": 2160
2015
+ },
2016
+ {
2017
+ "epoch": 0.69,
2018
+ "learning_rate": 0.0002,
2019
+ "loss": 0.7504,
2020
+ "step": 2170
2021
+ },
2022
+ {
2023
+ "epoch": 0.69,
2024
+ "learning_rate": 0.0002,
2025
+ "loss": 0.7558,
2026
+ "step": 2180
2027
+ },
2028
+ {
2029
+ "epoch": 0.69,
2030
+ "learning_rate": 0.0002,
2031
+ "loss": 0.7143,
2032
+ "step": 2190
2033
+ },
2034
+ {
2035
+ "epoch": 0.7,
2036
+ "learning_rate": 0.0002,
2037
+ "loss": 0.7767,
2038
+ "step": 2200
2039
+ },
2040
+ {
2041
+ "epoch": 0.7,
2042
+ "eval_loss": 0.7396783232688904,
2043
+ "eval_runtime": 111.0434,
2044
+ "eval_samples_per_second": 9.005,
2045
+ "eval_steps_per_second": 4.503,
2046
+ "step": 2200
2047
+ },
2048
+ {
2049
+ "epoch": 0.7,
2050
+ "mmlu_eval_accuracy": 0.48937488654796385,
2051
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
2052
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
2053
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2054
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
2055
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
2056
+ "mmlu_eval_accuracy_college_biology": 0.4375,
2057
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2058
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
2059
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
2060
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
2061
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
2062
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
2063
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
2064
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2065
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
2066
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
2067
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
2068
+ "mmlu_eval_accuracy_global_facts": 0.4,
2069
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
2070
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
2071
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2072
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
2073
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
2074
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
2075
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
2076
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2077
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
2078
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
2079
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
2080
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2081
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
2082
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
2083
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2084
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
2085
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
2086
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
2087
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
2088
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
2089
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
2090
+ "mmlu_eval_accuracy_marketing": 0.88,
2091
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
2092
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
2093
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
2094
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2095
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
2096
+ "mmlu_eval_accuracy_philosophy": 0.5,
2097
+ "mmlu_eval_accuracy_prehistory": 0.4,
2098
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
2099
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
2100
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
2101
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
2102
+ "mmlu_eval_accuracy_public_relations": 0.5,
2103
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
2104
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2105
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
2106
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2107
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
2108
+ "mmlu_loss": 1.464440327400327,
2109
+ "step": 2200
2110
+ },
2111
+ {
2112
+ "epoch": 0.7,
2113
+ "learning_rate": 0.0002,
2114
+ "loss": 0.7848,
2115
+ "step": 2210
2116
+ },
2117
+ {
2118
+ "epoch": 0.7,
2119
+ "learning_rate": 0.0002,
2120
+ "loss": 0.7864,
2121
+ "step": 2220
2122
+ },
2123
+ {
2124
+ "epoch": 0.71,
2125
+ "learning_rate": 0.0002,
2126
+ "loss": 0.7609,
2127
+ "step": 2230
2128
+ },
2129
+ {
2130
+ "epoch": 0.71,
2131
+ "learning_rate": 0.0002,
2132
+ "loss": 0.7782,
2133
+ "step": 2240
2134
+ },
2135
+ {
2136
+ "epoch": 0.71,
2137
+ "learning_rate": 0.0002,
2138
+ "loss": 0.7825,
2139
+ "step": 2250
2140
+ },
2141
+ {
2142
+ "epoch": 0.72,
2143
+ "learning_rate": 0.0002,
2144
+ "loss": 0.85,
2145
+ "step": 2260
2146
+ },
2147
+ {
2148
+ "epoch": 0.72,
2149
+ "learning_rate": 0.0002,
2150
+ "loss": 0.7802,
2151
+ "step": 2270
2152
+ },
2153
+ {
2154
+ "epoch": 0.72,
2155
+ "learning_rate": 0.0002,
2156
+ "loss": 0.7715,
2157
+ "step": 2280
2158
+ },
2159
+ {
2160
+ "epoch": 0.73,
2161
+ "learning_rate": 0.0002,
2162
+ "loss": 0.8032,
2163
+ "step": 2290
2164
+ },
2165
+ {
2166
+ "epoch": 0.73,
2167
+ "learning_rate": 0.0002,
2168
+ "loss": 0.854,
2169
+ "step": 2300
2170
+ },
2171
+ {
2172
+ "epoch": 0.73,
2173
+ "learning_rate": 0.0002,
2174
+ "loss": 0.8123,
2175
+ "step": 2310
2176
+ },
2177
+ {
2178
+ "epoch": 0.74,
2179
+ "learning_rate": 0.0002,
2180
+ "loss": 0.8101,
2181
+ "step": 2320
2182
+ },
2183
+ {
2184
+ "epoch": 0.74,
2185
+ "learning_rate": 0.0002,
2186
+ "loss": 0.8075,
2187
+ "step": 2330
2188
+ },
2189
+ {
2190
+ "epoch": 0.74,
2191
+ "learning_rate": 0.0002,
2192
+ "loss": 0.817,
2193
+ "step": 2340
2194
+ },
2195
+ {
2196
+ "epoch": 0.74,
2197
+ "learning_rate": 0.0002,
2198
+ "loss": 0.7747,
2199
+ "step": 2350
2200
+ },
2201
+ {
2202
+ "epoch": 0.75,
2203
+ "learning_rate": 0.0002,
2204
+ "loss": 0.8012,
2205
+ "step": 2360
2206
+ },
2207
+ {
2208
+ "epoch": 0.75,
2209
+ "learning_rate": 0.0002,
2210
+ "loss": 0.7893,
2211
+ "step": 2370
2212
+ },
2213
+ {
2214
+ "epoch": 0.75,
2215
+ "learning_rate": 0.0002,
2216
+ "loss": 0.7661,
2217
+ "step": 2380
2218
+ },
2219
+ {
2220
+ "epoch": 0.76,
2221
+ "learning_rate": 0.0002,
2222
+ "loss": 0.7711,
2223
+ "step": 2390
2224
+ },
2225
+ {
2226
+ "epoch": 0.76,
2227
+ "learning_rate": 0.0002,
2228
+ "loss": 0.8136,
2229
+ "step": 2400
2230
+ },
2231
+ {
2232
+ "epoch": 0.76,
2233
+ "eval_loss": 0.7395493388175964,
2234
+ "eval_runtime": 110.7923,
2235
+ "eval_samples_per_second": 9.026,
2236
+ "eval_steps_per_second": 4.513,
2237
+ "step": 2400
2238
+ },
2239
+ {
2240
+ "epoch": 0.76,
2241
+ "mmlu_eval_accuracy": 0.4873047408851529,
2242
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
2243
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
2244
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2245
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
2246
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
2247
+ "mmlu_eval_accuracy_college_biology": 0.4375,
2248
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2249
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
2250
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
2251
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
2252
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
2253
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
2254
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
2255
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2256
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
2257
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
2258
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
2259
+ "mmlu_eval_accuracy_global_facts": 0.4,
2260
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
2261
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
2262
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2263
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
2264
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
2265
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
2266
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
2267
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2268
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
2269
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
2270
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
2271
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2272
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
2273
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
2274
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2275
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
2276
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
2277
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
2278
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
2279
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
2280
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
2281
+ "mmlu_eval_accuracy_marketing": 0.88,
2282
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
2283
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
2284
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
2285
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2286
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
2287
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
2288
+ "mmlu_eval_accuracy_prehistory": 0.4,
2289
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
2290
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
2291
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
2292
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
2293
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
2294
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
2295
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2296
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
2297
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2298
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
2299
+ "mmlu_loss": 1.3917097181237397,
2300
+ "step": 2400
2301
+ },
2302
+ {
2303
+ "epoch": 0.76,
2304
+ "learning_rate": 0.0002,
2305
+ "loss": 0.7579,
2306
+ "step": 2410
2307
+ },
2308
+ {
2309
+ "epoch": 0.77,
2310
+ "learning_rate": 0.0002,
2311
+ "loss": 0.8421,
2312
+ "step": 2420
2313
+ },
2314
+ {
2315
+ "epoch": 0.77,
2316
+ "learning_rate": 0.0002,
2317
+ "loss": 0.7957,
2318
+ "step": 2430
2319
+ },
2320
+ {
2321
+ "epoch": 0.77,
2322
+ "learning_rate": 0.0002,
2323
+ "loss": 0.7452,
2324
+ "step": 2440
2325
+ },
2326
+ {
2327
+ "epoch": 0.78,
2328
+ "learning_rate": 0.0002,
2329
+ "loss": 0.8478,
2330
+ "step": 2450
2331
+ },
2332
+ {
2333
+ "epoch": 0.78,
2334
+ "learning_rate": 0.0002,
2335
+ "loss": 0.8443,
2336
+ "step": 2460
2337
+ },
2338
+ {
2339
+ "epoch": 0.78,
2340
+ "learning_rate": 0.0002,
2341
+ "loss": 0.8409,
2342
+ "step": 2470
2343
+ },
2344
+ {
2345
+ "epoch": 0.79,
2346
+ "learning_rate": 0.0002,
2347
+ "loss": 0.8168,
2348
+ "step": 2480
2349
+ },
2350
+ {
2351
+ "epoch": 0.79,
2352
+ "learning_rate": 0.0002,
2353
+ "loss": 0.7648,
2354
+ "step": 2490
2355
+ },
2356
+ {
2357
+ "epoch": 0.79,
2358
+ "learning_rate": 0.0002,
2359
+ "loss": 0.7938,
2360
+ "step": 2500
2361
+ },
2362
+ {
2363
+ "epoch": 0.8,
2364
+ "learning_rate": 0.0002,
2365
+ "loss": 0.791,
2366
+ "step": 2510
2367
+ },
2368
+ {
2369
+ "epoch": 0.8,
2370
+ "learning_rate": 0.0002,
2371
+ "loss": 0.7691,
2372
+ "step": 2520
2373
+ },
2374
+ {
2375
+ "epoch": 0.8,
2376
+ "learning_rate": 0.0002,
2377
+ "loss": 0.7648,
2378
+ "step": 2530
2379
+ },
2380
+ {
2381
+ "epoch": 0.8,
2382
+ "learning_rate": 0.0002,
2383
+ "loss": 0.7575,
2384
+ "step": 2540
2385
+ },
2386
+ {
2387
+ "epoch": 0.81,
2388
+ "learning_rate": 0.0002,
2389
+ "loss": 0.7797,
2390
+ "step": 2550
2391
+ },
2392
+ {
2393
+ "epoch": 0.81,
2394
+ "learning_rate": 0.0002,
2395
+ "loss": 0.7742,
2396
+ "step": 2560
2397
+ },
2398
+ {
2399
+ "epoch": 0.81,
2400
+ "learning_rate": 0.0002,
2401
+ "loss": 0.8391,
2402
+ "step": 2570
2403
+ },
2404
+ {
2405
+ "epoch": 0.82,
2406
+ "learning_rate": 0.0002,
2407
+ "loss": 0.7746,
2408
+ "step": 2580
2409
+ },
2410
+ {
2411
+ "epoch": 0.82,
2412
+ "learning_rate": 0.0002,
2413
+ "loss": 0.7534,
2414
+ "step": 2590
2415
+ },
2416
+ {
2417
+ "epoch": 0.82,
2418
+ "learning_rate": 0.0002,
2419
+ "loss": 0.7395,
2420
+ "step": 2600
2421
+ },
2422
+ {
2423
+ "epoch": 0.82,
2424
+ "eval_loss": 0.7380212545394897,
2425
+ "eval_runtime": 111.0553,
2426
+ "eval_samples_per_second": 9.005,
2427
+ "eval_steps_per_second": 4.502,
2428
+ "step": 2600
2429
+ },
2430
+ {
2431
+ "epoch": 0.82,
2432
+ "mmlu_eval_accuracy": 0.4979448031756729,
2433
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
2434
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
2435
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2436
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
2437
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
2438
+ "mmlu_eval_accuracy_college_biology": 0.5,
2439
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2440
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
2441
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
2442
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
2443
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
2444
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
2445
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
2446
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2447
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
2448
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
2449
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
2450
+ "mmlu_eval_accuracy_global_facts": 0.4,
2451
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
2452
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
2453
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2454
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
2455
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
2456
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
2457
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
2458
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2459
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
2460
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
2461
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
2462
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2463
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
2464
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
2465
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2466
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
2467
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
2468
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
2469
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
2470
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
2471
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
2472
+ "mmlu_eval_accuracy_marketing": 0.88,
2473
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
2474
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
2475
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
2476
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2477
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
2478
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
2479
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
2480
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
2481
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
2482
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
2483
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
2484
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
2485
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
2486
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2487
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
2488
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2489
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
2490
+ "mmlu_loss": 1.3912735815614696,
2491
+ "step": 2600
2492
+ },
2493
+ {
2494
+ "epoch": 0.83,
2495
+ "learning_rate": 0.0002,
2496
+ "loss": 0.7792,
2497
+ "step": 2610
2498
+ },
2499
+ {
2500
+ "epoch": 0.83,
2501
+ "learning_rate": 0.0002,
2502
+ "loss": 0.7228,
2503
+ "step": 2620
2504
+ },
2505
+ {
2506
+ "epoch": 0.83,
2507
+ "learning_rate": 0.0002,
2508
+ "loss": 0.7294,
2509
+ "step": 2630
2510
+ },
2511
+ {
2512
+ "epoch": 0.84,
2513
+ "learning_rate": 0.0002,
2514
+ "loss": 0.6968,
2515
+ "step": 2640
2516
+ },
2517
+ {
2518
+ "epoch": 0.84,
2519
+ "learning_rate": 0.0002,
2520
+ "loss": 0.7463,
2521
+ "step": 2650
2522
+ },
2523
+ {
2524
+ "epoch": 0.84,
2525
+ "learning_rate": 0.0002,
2526
+ "loss": 0.7588,
2527
+ "step": 2660
2528
+ },
2529
+ {
2530
+ "epoch": 0.85,
2531
+ "learning_rate": 0.0002,
2532
+ "loss": 0.7406,
2533
+ "step": 2670
2534
+ },
2535
+ {
2536
+ "epoch": 0.85,
2537
+ "learning_rate": 0.0002,
2538
+ "loss": 0.7817,
2539
+ "step": 2680
2540
+ },
2541
+ {
2542
+ "epoch": 0.85,
2543
+ "learning_rate": 0.0002,
2544
+ "loss": 0.808,
2545
+ "step": 2690
2546
+ },
2547
+ {
2548
+ "epoch": 0.86,
2549
+ "learning_rate": 0.0002,
2550
+ "loss": 0.771,
2551
+ "step": 2700
2552
+ },
2553
+ {
2554
+ "epoch": 0.86,
2555
+ "learning_rate": 0.0002,
2556
+ "loss": 0.7678,
2557
+ "step": 2710
2558
+ },
2559
+ {
2560
+ "epoch": 0.86,
2561
+ "learning_rate": 0.0002,
2562
+ "loss": 0.7885,
2563
+ "step": 2720
2564
+ },
2565
+ {
2566
+ "epoch": 0.87,
2567
+ "learning_rate": 0.0002,
2568
+ "loss": 0.8297,
2569
+ "step": 2730
2570
+ },
2571
+ {
2572
+ "epoch": 0.87,
2573
+ "learning_rate": 0.0002,
2574
+ "loss": 0.8218,
2575
+ "step": 2740
2576
+ },
2577
+ {
2578
+ "epoch": 0.87,
2579
+ "learning_rate": 0.0002,
2580
+ "loss": 0.7742,
2581
+ "step": 2750
2582
+ },
2583
+ {
2584
+ "epoch": 0.87,
2585
+ "learning_rate": 0.0002,
2586
+ "loss": 0.7512,
2587
+ "step": 2760
2588
+ },
2589
+ {
2590
+ "epoch": 0.88,
2591
+ "learning_rate": 0.0002,
2592
+ "loss": 0.7508,
2593
+ "step": 2770
2594
+ },
2595
+ {
2596
+ "epoch": 0.88,
2597
+ "learning_rate": 0.0002,
2598
+ "loss": 0.7947,
2599
+ "step": 2780
2600
+ },
2601
+ {
2602
+ "epoch": 0.88,
2603
+ "learning_rate": 0.0002,
2604
+ "loss": 0.7399,
2605
+ "step": 2790
2606
+ },
2607
+ {
2608
+ "epoch": 0.89,
2609
+ "learning_rate": 0.0002,
2610
+ "loss": 0.7589,
2611
+ "step": 2800
2612
+ },
2613
+ {
2614
+ "epoch": 0.89,
2615
+ "eval_loss": 0.7355720400810242,
2616
+ "eval_runtime": 110.8718,
2617
+ "eval_samples_per_second": 9.019,
2618
+ "eval_steps_per_second": 4.51,
2619
+ "step": 2800
2620
+ },
2621
+ {
2622
+ "epoch": 0.89,
2623
+ "mmlu_eval_accuracy": 0.48346048137181885,
2624
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
2625
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
2626
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2627
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
2628
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
2629
+ "mmlu_eval_accuracy_college_biology": 0.5,
2630
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2631
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
2632
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
2633
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
2634
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
2635
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
2636
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
2637
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2638
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
2639
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
2640
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
2641
+ "mmlu_eval_accuracy_global_facts": 0.3,
2642
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
2643
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
2644
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2645
+ "mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
2646
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
2647
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
2648
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
2649
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2650
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
2651
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
2652
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
2653
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2654
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
2655
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
2656
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2657
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
2658
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
2659
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
2660
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
2661
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
2662
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
2663
+ "mmlu_eval_accuracy_marketing": 0.84,
2664
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
2665
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
2666
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
2667
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2668
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
2669
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
2670
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
2671
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
2672
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
2673
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
2674
+ "mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
2675
+ "mmlu_eval_accuracy_public_relations": 0.5,
2676
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
2677
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2678
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
2679
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2680
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
2681
+ "mmlu_loss": 1.3572583458831353,
2682
+ "step": 2800
2683
+ },
2684
+ {
2685
+ "epoch": 0.89,
2686
+ "learning_rate": 0.0002,
2687
+ "loss": 0.8099,
2688
+ "step": 2810
2689
+ },
2690
+ {
2691
+ "epoch": 0.89,
2692
+ "learning_rate": 0.0002,
2693
+ "loss": 0.7303,
2694
+ "step": 2820
2695
+ },
2696
+ {
2697
+ "epoch": 0.9,
2698
+ "learning_rate": 0.0002,
2699
+ "loss": 0.8154,
2700
+ "step": 2830
2701
+ },
2702
+ {
2703
+ "epoch": 0.9,
2704
+ "learning_rate": 0.0002,
2705
+ "loss": 0.8166,
2706
+ "step": 2840
2707
+ },
2708
+ {
2709
+ "epoch": 0.9,
2710
+ "learning_rate": 0.0002,
2711
+ "loss": 0.7425,
2712
+ "step": 2850
2713
+ },
2714
+ {
2715
+ "epoch": 0.91,
2716
+ "learning_rate": 0.0002,
2717
+ "loss": 0.8223,
2718
+ "step": 2860
2719
+ },
2720
+ {
2721
+ "epoch": 0.91,
2722
+ "learning_rate": 0.0002,
2723
+ "loss": 0.7443,
2724
+ "step": 2870
2725
+ },
2726
+ {
2727
+ "epoch": 0.91,
2728
+ "learning_rate": 0.0002,
2729
+ "loss": 0.7733,
2730
+ "step": 2880
2731
+ },
2732
+ {
2733
+ "epoch": 0.92,
2734
+ "learning_rate": 0.0002,
2735
+ "loss": 0.8092,
2736
+ "step": 2890
2737
+ },
2738
+ {
2739
+ "epoch": 0.92,
2740
+ "learning_rate": 0.0002,
2741
+ "loss": 0.7371,
2742
+ "step": 2900
2743
+ },
2744
+ {
2745
+ "epoch": 0.92,
2746
+ "learning_rate": 0.0002,
2747
+ "loss": 0.7323,
2748
+ "step": 2910
2749
+ },
2750
+ {
2751
+ "epoch": 0.93,
2752
+ "learning_rate": 0.0002,
2753
+ "loss": 0.7716,
2754
+ "step": 2920
2755
+ },
2756
+ {
2757
+ "epoch": 0.93,
2758
+ "learning_rate": 0.0002,
2759
+ "loss": 0.7824,
2760
+ "step": 2930
2761
+ },
2762
+ {
2763
+ "epoch": 0.93,
2764
+ "learning_rate": 0.0002,
2765
+ "loss": 0.7373,
2766
+ "step": 2940
2767
+ },
2768
+ {
2769
+ "epoch": 0.93,
2770
+ "learning_rate": 0.0002,
2771
+ "loss": 0.7384,
2772
+ "step": 2950
2773
+ },
2774
+ {
2775
+ "epoch": 0.94,
2776
+ "learning_rate": 0.0002,
2777
+ "loss": 0.7598,
2778
+ "step": 2960
2779
+ },
2780
+ {
2781
+ "epoch": 0.94,
2782
+ "learning_rate": 0.0002,
2783
+ "loss": 0.7211,
2784
+ "step": 2970
2785
+ },
2786
+ {
2787
+ "epoch": 0.94,
2788
+ "learning_rate": 0.0002,
2789
+ "loss": 0.7886,
2790
+ "step": 2980
2791
+ },
2792
+ {
2793
+ "epoch": 0.95,
2794
+ "learning_rate": 0.0002,
2795
+ "loss": 0.8107,
2796
+ "step": 2990
2797
+ },
2798
+ {
2799
+ "epoch": 0.95,
2800
+ "learning_rate": 0.0002,
2801
+ "loss": 0.8389,
2802
+ "step": 3000
2803
+ },
2804
+ {
2805
+ "epoch": 0.95,
2806
+ "eval_loss": 0.7343361377716064,
2807
+ "eval_runtime": 110.9061,
2808
+ "eval_samples_per_second": 9.017,
2809
+ "eval_steps_per_second": 4.508,
2810
+ "step": 3000
2811
+ },
2812
+ {
2813
+ "epoch": 0.95,
2814
+ "mmlu_eval_accuracy": 0.5003901788212859,
2815
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
2816
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
2817
+ "mmlu_eval_accuracy_astronomy": 0.375,
2818
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
2819
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
2820
+ "mmlu_eval_accuracy_college_biology": 0.4375,
2821
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2822
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
2823
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
2824
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
2825
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
2826
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
2827
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
2828
+ "mmlu_eval_accuracy_econometrics": 0.25,
2829
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
2830
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
2831
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
2832
+ "mmlu_eval_accuracy_global_facts": 0.5,
2833
+ "mmlu_eval_accuracy_high_school_biology": 0.53125,
2834
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
2835
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2836
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
2837
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
2838
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
2839
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
2840
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2841
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
2842
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
2843
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
2844
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
2845
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
2846
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
2847
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2848
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
2849
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
2850
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
2851
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
2852
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
2853
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
2854
+ "mmlu_eval_accuracy_marketing": 0.84,
2855
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
2856
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
2857
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
2858
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
2859
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
2860
+ "mmlu_eval_accuracy_philosophy": 0.5,
2861
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
2862
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
2863
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
2864
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
2865
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
2866
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
2867
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
2868
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
2869
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
2870
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2871
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
2872
+ "mmlu_loss": 1.217419229584014,
2873
+ "step": 3000
2874
+ }
2875
+ ],
2876
+ "max_steps": 10000,
2877
+ "num_train_epochs": 4,
2878
+ "total_flos": 9.100795987905085e+17,
2879
+ "trial_name": null,
2880
+ "trial_params": null
2881
+ }
{checkpoint-800 β†’ checkpoint-3000}/training_args.bin RENAMED
File without changes
checkpoint-800/trainer_state.json DELETED
@@ -1,780 +0,0 @@
1
- {
2
- "best_metric": 0.7563537359237671,
3
- "best_model_checkpoint": "experts/expert-16/checkpoint-800",
4
- "epoch": 0.2534854245880862,
5
- "global_step": 800,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 0.0002,
13
- "loss": 0.8339,
14
- "step": 10
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 0.0002,
19
- "loss": 0.8289,
20
- "step": 20
21
- },
22
- {
23
- "epoch": 0.01,
24
- "learning_rate": 0.0002,
25
- "loss": 0.9041,
26
- "step": 30
27
- },
28
- {
29
- "epoch": 0.01,
30
- "learning_rate": 0.0002,
31
- "loss": 0.8491,
32
- "step": 40
33
- },
34
- {
35
- "epoch": 0.02,
36
- "learning_rate": 0.0002,
37
- "loss": 0.8151,
38
- "step": 50
39
- },
40
- {
41
- "epoch": 0.02,
42
- "learning_rate": 0.0002,
43
- "loss": 0.79,
44
- "step": 60
45
- },
46
- {
47
- "epoch": 0.02,
48
- "learning_rate": 0.0002,
49
- "loss": 0.7835,
50
- "step": 70
51
- },
52
- {
53
- "epoch": 0.03,
54
- "learning_rate": 0.0002,
55
- "loss": 0.8831,
56
- "step": 80
57
- },
58
- {
59
- "epoch": 0.03,
60
- "learning_rate": 0.0002,
61
- "loss": 0.8607,
62
- "step": 90
63
- },
64
- {
65
- "epoch": 0.03,
66
- "learning_rate": 0.0002,
67
- "loss": 0.7876,
68
- "step": 100
69
- },
70
- {
71
- "epoch": 0.03,
72
- "learning_rate": 0.0002,
73
- "loss": 0.8031,
74
- "step": 110
75
- },
76
- {
77
- "epoch": 0.04,
78
- "learning_rate": 0.0002,
79
- "loss": 0.8207,
80
- "step": 120
81
- },
82
- {
83
- "epoch": 0.04,
84
- "learning_rate": 0.0002,
85
- "loss": 0.807,
86
- "step": 130
87
- },
88
- {
89
- "epoch": 0.04,
90
- "learning_rate": 0.0002,
91
- "loss": 0.9262,
92
- "step": 140
93
- },
94
- {
95
- "epoch": 0.05,
96
- "learning_rate": 0.0002,
97
- "loss": 0.7964,
98
- "step": 150
99
- },
100
- {
101
- "epoch": 0.05,
102
- "learning_rate": 0.0002,
103
- "loss": 0.7879,
104
- "step": 160
105
- },
106
- {
107
- "epoch": 0.05,
108
- "learning_rate": 0.0002,
109
- "loss": 0.7587,
110
- "step": 170
111
- },
112
- {
113
- "epoch": 0.06,
114
- "learning_rate": 0.0002,
115
- "loss": 0.8091,
116
- "step": 180
117
- },
118
- {
119
- "epoch": 0.06,
120
- "learning_rate": 0.0002,
121
- "loss": 0.8615,
122
- "step": 190
123
- },
124
- {
125
- "epoch": 0.06,
126
- "learning_rate": 0.0002,
127
- "loss": 0.8672,
128
- "step": 200
129
- },
130
- {
131
- "epoch": 0.06,
132
- "eval_loss": 0.7779108881950378,
133
- "eval_runtime": 110.9863,
134
- "eval_samples_per_second": 9.01,
135
- "eval_steps_per_second": 4.505,
136
- "step": 200
137
- },
138
- {
139
- "epoch": 0.06,
140
- "mmlu_eval_accuracy": 0.4744171116325413,
141
- "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
- "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
- "mmlu_eval_accuracy_astronomy": 0.4375,
144
- "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
- "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
146
- "mmlu_eval_accuracy_college_biology": 0.4375,
147
- "mmlu_eval_accuracy_college_chemistry": 0.125,
148
- "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
- "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
- "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
- "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
- "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
- "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
154
- "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
- "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
- "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
157
- "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
158
- "mmlu_eval_accuracy_global_facts": 0.4,
159
- "mmlu_eval_accuracy_high_school_biology": 0.375,
160
- "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
- "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
- "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
163
- "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
- "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
- "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
- "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
168
- "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
- "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
- "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
171
- "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
172
- "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
- "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
- "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
175
- "mmlu_eval_accuracy_international_law": 0.8461538461538461,
176
- "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
177
- "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
- "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
- "mmlu_eval_accuracy_management": 0.6363636363636364,
180
- "mmlu_eval_accuracy_marketing": 0.88,
181
- "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
- "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
183
- "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
- "mmlu_eval_accuracy_moral_scenarios": 0.27,
185
- "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
- "mmlu_eval_accuracy_philosophy": 0.5,
187
- "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
- "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
- "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
190
- "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
- "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
192
- "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
- "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
- "mmlu_eval_accuracy_sociology": 0.6818181818181818,
195
- "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
- "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
- "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
- "mmlu_loss": 1.5868234255450824,
199
- "step": 200
200
- },
201
- {
202
- "epoch": 0.07,
203
- "learning_rate": 0.0002,
204
- "loss": 0.8316,
205
- "step": 210
206
- },
207
- {
208
- "epoch": 0.07,
209
- "learning_rate": 0.0002,
210
- "loss": 0.8454,
211
- "step": 220
212
- },
213
- {
214
- "epoch": 0.07,
215
- "learning_rate": 0.0002,
216
- "loss": 0.8434,
217
- "step": 230
218
- },
219
- {
220
- "epoch": 0.08,
221
- "learning_rate": 0.0002,
222
- "loss": 0.821,
223
- "step": 240
224
- },
225
- {
226
- "epoch": 0.08,
227
- "learning_rate": 0.0002,
228
- "loss": 0.7893,
229
- "step": 250
230
- },
231
- {
232
- "epoch": 0.08,
233
- "learning_rate": 0.0002,
234
- "loss": 0.8242,
235
- "step": 260
236
- },
237
- {
238
- "epoch": 0.09,
239
- "learning_rate": 0.0002,
240
- "loss": 0.8128,
241
- "step": 270
242
- },
243
- {
244
- "epoch": 0.09,
245
- "learning_rate": 0.0002,
246
- "loss": 0.8344,
247
- "step": 280
248
- },
249
- {
250
- "epoch": 0.09,
251
- "learning_rate": 0.0002,
252
- "loss": 0.8338,
253
- "step": 290
254
- },
255
- {
256
- "epoch": 0.1,
257
- "learning_rate": 0.0002,
258
- "loss": 0.7981,
259
- "step": 300
260
- },
261
- {
262
- "epoch": 0.1,
263
- "learning_rate": 0.0002,
264
- "loss": 0.781,
265
- "step": 310
266
- },
267
- {
268
- "epoch": 0.1,
269
- "learning_rate": 0.0002,
270
- "loss": 0.7717,
271
- "step": 320
272
- },
273
- {
274
- "epoch": 0.1,
275
- "learning_rate": 0.0002,
276
- "loss": 0.767,
277
- "step": 330
278
- },
279
- {
280
- "epoch": 0.11,
281
- "learning_rate": 0.0002,
282
- "loss": 0.7925,
283
- "step": 340
284
- },
285
- {
286
- "epoch": 0.11,
287
- "learning_rate": 0.0002,
288
- "loss": 0.8226,
289
- "step": 350
290
- },
291
- {
292
- "epoch": 0.11,
293
- "learning_rate": 0.0002,
294
- "loss": 0.7912,
295
- "step": 360
296
- },
297
- {
298
- "epoch": 0.12,
299
- "learning_rate": 0.0002,
300
- "loss": 0.8093,
301
- "step": 370
302
- },
303
- {
304
- "epoch": 0.12,
305
- "learning_rate": 0.0002,
306
- "loss": 0.7648,
307
- "step": 380
308
- },
309
- {
310
- "epoch": 0.12,
311
- "learning_rate": 0.0002,
312
- "loss": 0.7866,
313
- "step": 390
314
- },
315
- {
316
- "epoch": 0.13,
317
- "learning_rate": 0.0002,
318
- "loss": 0.7976,
319
- "step": 400
320
- },
321
- {
322
- "epoch": 0.13,
323
- "eval_loss": 0.7656086683273315,
324
- "eval_runtime": 110.9802,
325
- "eval_samples_per_second": 9.011,
326
- "eval_steps_per_second": 4.505,
327
- "step": 400
328
- },
329
- {
330
- "epoch": 0.13,
331
- "mmlu_eval_accuracy": 0.47124130233512024,
332
- "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
- "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
- "mmlu_eval_accuracy_astronomy": 0.4375,
335
- "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
- "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
- "mmlu_eval_accuracy_college_biology": 0.4375,
338
- "mmlu_eval_accuracy_college_chemistry": 0.125,
339
- "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
- "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
- "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
- "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
343
- "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
344
- "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
- "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
- "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
- "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
348
- "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
- "mmlu_eval_accuracy_global_facts": 0.4,
350
- "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
- "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
- "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
- "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
354
- "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
- "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
- "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
- "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
359
- "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
- "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
361
- "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
362
- "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
- "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
- "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
- "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
- "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
- "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
368
- "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
- "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
- "mmlu_eval_accuracy_management": 0.6363636363636364,
371
- "mmlu_eval_accuracy_marketing": 0.84,
372
- "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
- "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
374
- "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
- "mmlu_eval_accuracy_moral_scenarios": 0.25,
376
- "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
- "mmlu_eval_accuracy_philosophy": 0.5,
378
- "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
379
- "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
- "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
- "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
- "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
- "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
- "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
385
- "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
- "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
- "mmlu_eval_accuracy_virology": 0.5,
388
- "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
- "mmlu_loss": 1.4339068503199297,
390
- "step": 400
391
- },
392
- {
393
- "epoch": 0.13,
394
- "learning_rate": 0.0002,
395
- "loss": 0.8182,
396
- "step": 410
397
- },
398
- {
399
- "epoch": 0.13,
400
- "learning_rate": 0.0002,
401
- "loss": 0.8438,
402
- "step": 420
403
- },
404
- {
405
- "epoch": 0.14,
406
- "learning_rate": 0.0002,
407
- "loss": 0.8184,
408
- "step": 430
409
- },
410
- {
411
- "epoch": 0.14,
412
- "learning_rate": 0.0002,
413
- "loss": 0.8202,
414
- "step": 440
415
- },
416
- {
417
- "epoch": 0.14,
418
- "learning_rate": 0.0002,
419
- "loss": 0.8264,
420
- "step": 450
421
- },
422
- {
423
- "epoch": 0.15,
424
- "learning_rate": 0.0002,
425
- "loss": 0.8384,
426
- "step": 460
427
- },
428
- {
429
- "epoch": 0.15,
430
- "learning_rate": 0.0002,
431
- "loss": 0.8372,
432
- "step": 470
433
- },
434
- {
435
- "epoch": 0.15,
436
- "learning_rate": 0.0002,
437
- "loss": 0.8072,
438
- "step": 480
439
- },
440
- {
441
- "epoch": 0.16,
442
- "learning_rate": 0.0002,
443
- "loss": 0.8214,
444
- "step": 490
445
- },
446
- {
447
- "epoch": 0.16,
448
- "learning_rate": 0.0002,
449
- "loss": 0.814,
450
- "step": 500
451
- },
452
- {
453
- "epoch": 0.16,
454
- "learning_rate": 0.0002,
455
- "loss": 0.847,
456
- "step": 510
457
- },
458
- {
459
- "epoch": 0.16,
460
- "learning_rate": 0.0002,
461
- "loss": 0.8444,
462
- "step": 520
463
- },
464
- {
465
- "epoch": 0.17,
466
- "learning_rate": 0.0002,
467
- "loss": 0.8096,
468
- "step": 530
469
- },
470
- {
471
- "epoch": 0.17,
472
- "learning_rate": 0.0002,
473
- "loss": 0.8496,
474
- "step": 540
475
- },
476
- {
477
- "epoch": 0.17,
478
- "learning_rate": 0.0002,
479
- "loss": 0.7729,
480
- "step": 550
481
- },
482
- {
483
- "epoch": 0.18,
484
- "learning_rate": 0.0002,
485
- "loss": 0.7826,
486
- "step": 560
487
- },
488
- {
489
- "epoch": 0.18,
490
- "learning_rate": 0.0002,
491
- "loss": 0.7478,
492
- "step": 570
493
- },
494
- {
495
- "epoch": 0.18,
496
- "learning_rate": 0.0002,
497
- "loss": 0.7953,
498
- "step": 580
499
- },
500
- {
501
- "epoch": 0.19,
502
- "learning_rate": 0.0002,
503
- "loss": 0.7363,
504
- "step": 590
505
- },
506
- {
507
- "epoch": 0.19,
508
- "learning_rate": 0.0002,
509
- "loss": 0.7971,
510
- "step": 600
511
- },
512
- {
513
- "epoch": 0.19,
514
- "eval_loss": 0.7616064548492432,
515
- "eval_runtime": 110.9404,
516
- "eval_samples_per_second": 9.014,
517
- "eval_steps_per_second": 4.507,
518
- "step": 600
519
- },
520
- {
521
- "epoch": 0.19,
522
- "mmlu_eval_accuracy": 0.4749850916074463,
523
- "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
524
- "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
525
- "mmlu_eval_accuracy_astronomy": 0.4375,
526
- "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
527
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
528
- "mmlu_eval_accuracy_college_biology": 0.4375,
529
- "mmlu_eval_accuracy_college_chemistry": 0.25,
530
- "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
531
- "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
532
- "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
533
- "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
534
- "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
- "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
536
- "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
537
- "mmlu_eval_accuracy_electrical_engineering": 0.25,
538
- "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
539
- "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
540
- "mmlu_eval_accuracy_global_facts": 0.3,
541
- "mmlu_eval_accuracy_high_school_biology": 0.40625,
542
- "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
543
- "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
544
- "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
545
- "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
546
- "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
547
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
548
- "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
549
- "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
550
- "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
551
- "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
552
- "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
553
- "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
554
- "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
555
- "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
556
- "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
557
- "mmlu_eval_accuracy_international_law": 0.8461538461538461,
558
- "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
559
- "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
560
- "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
561
- "mmlu_eval_accuracy_management": 0.6363636363636364,
562
- "mmlu_eval_accuracy_marketing": 0.84,
563
- "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
564
- "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
565
- "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
566
- "mmlu_eval_accuracy_moral_scenarios": 0.26,
567
- "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
568
- "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
569
- "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
570
- "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
571
- "mmlu_eval_accuracy_professional_law": 0.3,
572
- "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
573
- "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
574
- "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
575
- "mmlu_eval_accuracy_security_studies": 0.5555555555555556,
576
- "mmlu_eval_accuracy_sociology": 0.6818181818181818,
577
- "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
578
- "mmlu_eval_accuracy_virology": 0.5,
579
- "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
580
- "mmlu_loss": 1.5647042619341658,
581
- "step": 600
582
- },
583
- {
584
- "epoch": 0.19,
585
- "learning_rate": 0.0002,
586
- "loss": 0.7936,
587
- "step": 610
588
- },
589
- {
590
- "epoch": 0.2,
591
- "learning_rate": 0.0002,
592
- "loss": 0.7319,
593
- "step": 620
594
- },
595
- {
596
- "epoch": 0.2,
597
- "learning_rate": 0.0002,
598
- "loss": 0.79,
599
- "step": 630
600
- },
601
- {
602
- "epoch": 0.2,
603
- "learning_rate": 0.0002,
604
- "loss": 0.7806,
605
- "step": 640
606
- },
607
- {
608
- "epoch": 0.21,
609
- "learning_rate": 0.0002,
610
- "loss": 0.8833,
611
- "step": 650
612
- },
613
- {
614
- "epoch": 0.21,
615
- "learning_rate": 0.0002,
616
- "loss": 0.7711,
617
- "step": 660
618
- },
619
- {
620
- "epoch": 0.21,
621
- "learning_rate": 0.0002,
622
- "loss": 0.8242,
623
- "step": 670
624
- },
625
- {
626
- "epoch": 0.22,
627
- "learning_rate": 0.0002,
628
- "loss": 0.7948,
629
- "step": 680
630
- },
631
- {
632
- "epoch": 0.22,
633
- "learning_rate": 0.0002,
634
- "loss": 0.7417,
635
- "step": 690
636
- },
637
- {
638
- "epoch": 0.22,
639
- "learning_rate": 0.0002,
640
- "loss": 0.7275,
641
- "step": 700
642
- },
643
- {
644
- "epoch": 0.22,
645
- "learning_rate": 0.0002,
646
- "loss": 0.8137,
647
- "step": 710
648
- },
649
- {
650
- "epoch": 0.23,
651
- "learning_rate": 0.0002,
652
- "loss": 0.8568,
653
- "step": 720
654
- },
655
- {
656
- "epoch": 0.23,
657
- "learning_rate": 0.0002,
658
- "loss": 0.802,
659
- "step": 730
660
- },
661
- {
662
- "epoch": 0.23,
663
- "learning_rate": 0.0002,
664
- "loss": 0.8202,
665
- "step": 740
666
- },
667
- {
668
- "epoch": 0.24,
669
- "learning_rate": 0.0002,
670
- "loss": 0.8077,
671
- "step": 750
672
- },
673
- {
674
- "epoch": 0.24,
675
- "learning_rate": 0.0002,
676
- "loss": 0.814,
677
- "step": 760
678
- },
679
- {
680
- "epoch": 0.24,
681
- "learning_rate": 0.0002,
682
- "loss": 0.7971,
683
- "step": 770
684
- },
685
- {
686
- "epoch": 0.25,
687
- "learning_rate": 0.0002,
688
- "loss": 0.798,
689
- "step": 780
690
- },
691
- {
692
- "epoch": 0.25,
693
- "learning_rate": 0.0002,
694
- "loss": 0.7806,
695
- "step": 790
696
- },
697
- {
698
- "epoch": 0.25,
699
- "learning_rate": 0.0002,
700
- "loss": 0.8042,
701
- "step": 800
702
- },
703
- {
704
- "epoch": 0.25,
705
- "eval_loss": 0.7563537359237671,
706
- "eval_runtime": 111.023,
707
- "eval_samples_per_second": 9.007,
708
- "eval_steps_per_second": 4.504,
709
- "step": 800
710
- },
711
- {
712
- "epoch": 0.25,
713
- "mmlu_eval_accuracy": 0.4796267144005645,
714
- "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
715
- "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
716
- "mmlu_eval_accuracy_astronomy": 0.4375,
717
- "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
718
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
719
- "mmlu_eval_accuracy_college_biology": 0.4375,
720
- "mmlu_eval_accuracy_college_chemistry": 0.125,
721
- "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
722
- "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
723
- "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
724
- "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
725
- "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
726
- "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
727
- "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
728
- "mmlu_eval_accuracy_electrical_engineering": 0.25,
729
- "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
730
- "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
731
- "mmlu_eval_accuracy_global_facts": 0.4,
732
- "mmlu_eval_accuracy_high_school_biology": 0.375,
733
- "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
734
- "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
- "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
736
- "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
737
- "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
738
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
739
- "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
740
- "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
741
- "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
742
- "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
743
- "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
744
- "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
745
- "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
746
- "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
747
- "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
748
- "mmlu_eval_accuracy_international_law": 0.8461538461538461,
749
- "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
750
- "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
751
- "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
752
- "mmlu_eval_accuracy_management": 0.6363636363636364,
753
- "mmlu_eval_accuracy_marketing": 0.84,
754
- "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
755
- "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
756
- "mmlu_eval_accuracy_moral_disputes": 0.5,
757
- "mmlu_eval_accuracy_moral_scenarios": 0.23,
758
- "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
759
- "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
760
- "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
761
- "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
762
- "mmlu_eval_accuracy_professional_law": 0.3,
763
- "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
764
- "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
765
- "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
766
- "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
767
- "mmlu_eval_accuracy_sociology": 0.6363636363636364,
768
- "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
769
- "mmlu_eval_accuracy_virology": 0.5555555555555556,
770
- "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
771
- "mmlu_loss": 1.4866046660796157,
772
- "step": 800
773
- }
774
- ],
775
- "max_steps": 10000,
776
- "num_train_epochs": 4,
777
- "total_flos": 2.4426204707743334e+17,
778
- "trial_name": null,
779
- "trial_params": null
780
- }