terry69 commited on
Commit
f0e80cc
1 Parent(s): 94f2c0b

Model save

Browse files
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
  license: apache-2.0
3
- library_name: peft
4
  tags:
5
- - alignment-handbook
6
  - trl
7
  - sft
8
  - generated_from_trainer
9
- base_model: mistralai/Mistral-7B-Instruct-v0.2
10
  datasets:
11
- - preference-data
12
  model-index:
13
  - name: feedback_p0.1_seed42_level2_rare
14
  results: []
@@ -19,9 +17,9 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  # feedback_p0.1_seed42_level2_rare
21
 
22
- This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the preference-data dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.8673
25
 
26
  ## Model description
27
 
@@ -41,13 +39,13 @@ More information needed
41
 
42
  The following hyperparameters were used during training:
43
  - learning_rate: 1e-05
44
- - train_batch_size: 8
45
  - eval_batch_size: 1
46
  - seed: 42
47
  - distributed_type: multi-GPU
48
  - num_devices: 4
49
- - gradient_accumulation_steps: 2
50
- - total_train_batch_size: 64
51
  - total_eval_batch_size: 4
52
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
  - lr_scheduler_type: cosine
@@ -58,13 +56,12 @@ The following hyperparameters were used during training:
58
 
59
  | Training Loss | Epoch | Step | Validation Loss |
60
  |:-------------:|:------:|:----:|:---------------:|
61
- | 0.853 | 0.9988 | 423 | 0.8673 |
62
 
63
 
64
  ### Framework versions
65
 
66
- - PEFT 0.11.1
67
  - Transformers 4.43.4
68
  - Pytorch 2.3.1+cu121
69
  - Datasets 2.19.1
70
- - Tokenizers 0.19.1
 
1
  ---
2
  license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-Instruct-v0.2
4
  tags:
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
 
8
  datasets:
9
+ - generator
10
  model-index:
11
  - name: feedback_p0.1_seed42_level2_rare
12
  results: []
 
17
 
18
  # feedback_p0.1_seed42_level2_rare
19
 
20
+ This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 0.2977
23
 
24
  ## Model description
25
 
 
39
 
40
  The following hyperparameters were used during training:
41
  - learning_rate: 1e-05
42
+ - train_batch_size: 2
43
  - eval_batch_size: 1
44
  - seed: 42
45
  - distributed_type: multi-GPU
46
  - num_devices: 4
47
+ - gradient_accumulation_steps: 4
48
+ - total_train_batch_size: 32
49
  - total_eval_batch_size: 4
50
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
  - lr_scheduler_type: cosine
 
56
 
57
  | Training Loss | Epoch | Step | Validation Loss |
58
  |:-------------:|:------:|:----:|:---------------:|
59
+ | 0.346 | 0.9997 | 931 | 0.2977 |
60
 
61
 
62
  ### Framework versions
63
 
 
64
  - Transformers 4.43.4
65
  - Pytorch 2.3.1+cu121
66
  - Datasets 2.19.1
67
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,14 +1,9 @@
1
  {
2
- "epoch": 0.9988193624557261,
3
- "eval_loss": 0.8673050403594971,
4
- "eval_runtime": 2.0994,
5
- "eval_samples": 10,
6
- "eval_samples_per_second": 0.953,
7
- "eval_steps_per_second": 0.476,
8
- "total_flos": 1701364321222656.0,
9
  "train_loss": 0.0,
10
- "train_runtime": 0.0113,
11
- "train_samples": 90047,
12
- "train_samples_per_second": 2398604.87,
13
- "train_steps_per_second": 37460.213
14
  }
 
1
  {
2
+ "epoch": 0.9997315436241611,
3
+ "total_flos": 194880114524160.0,
 
 
 
 
 
4
  "train_loss": 0.0,
5
+ "train_runtime": 0.0085,
6
+ "train_samples": 98952,
7
+ "train_samples_per_second": 3497894.361,
8
+ "train_steps_per_second": 109279.854
9
  }
config.json CHANGED
@@ -22,6 +22,6 @@
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.43.4",
25
- "use_cache": true,
26
  "vocab_size": 32000
27
  }
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.43.4",
25
+ "use_cache": false,
26
  "vocab_size": 32000
27
  }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.43.4"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23d4d60e4f957c3594368f739562fe754e8ea1256c13894f088683c487b6c7e0
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce15fa203f380cff1d01841cb6e38cf8f8a7a7f36c064132891a4f399756a34
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6242f75d0617ab7a77e147861c4853cd980e50e6068b4dc5c75dec64b9b276f2
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
runs/Sep03_17-51-29_COE-CS-sv004/events.out.tfevents.1725386201.COE-CS-sv004.3435717.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1636c3fc38c9e1749da12137cab66d3ce236fe57d8fa00893b0412c2d7aac10b
3
+ size 45198
runs/Sep05_22-10-21_COE-CS-sv003/events.out.tfevents.1725574415.COE-CS-sv003.16667.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be1c341d64ae8eecb7c4a9027e665b213107f0c0a590a9536fe01f58c6c7fc25
3
+ size 5574
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 0.9988193624557261,
3
- "total_flos": 1701364321222656.0,
4
  "train_loss": 0.0,
5
- "train_runtime": 0.0113,
6
- "train_samples": 90047,
7
- "train_samples_per_second": 2398604.87,
8
- "train_steps_per_second": 37460.213
9
  }
 
1
  {
2
+ "epoch": 0.9997315436241611,
3
+ "total_flos": 194880114524160.0,
4
  "train_loss": 0.0,
5
+ "train_runtime": 0.0085,
6
+ "train_samples": 98952,
7
+ "train_samples_per_second": 3497894.361,
8
+ "train_steps_per_second": 109279.854
9
  }
trainer_state.json CHANGED
@@ -1,628 +1,1342 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9988193624557261,
5
  "eval_steps": 500,
6
- "global_step": 423,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0023612750885478157,
13
- "grad_norm": 1.5131582373299473,
14
- "learning_rate": 2.3255813953488374e-07,
15
- "loss": 1.3255,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.011806375442739079,
20
- "grad_norm": 1.4970470206944162,
21
- "learning_rate": 1.1627906976744188e-06,
22
- "loss": 1.3229,
23
  "step": 5
24
  },
25
  {
26
- "epoch": 0.023612750885478158,
27
- "grad_norm": 1.4534560068903173,
28
- "learning_rate": 2.3255813953488376e-06,
29
- "loss": 1.3249,
30
  "step": 10
31
  },
32
  {
33
- "epoch": 0.03541912632821724,
34
- "grad_norm": 1.3529390744329275,
35
- "learning_rate": 3.4883720930232564e-06,
36
- "loss": 1.3105,
37
  "step": 15
38
  },
39
  {
40
- "epoch": 0.047225501770956316,
41
- "grad_norm": 1.1377914835607748,
42
- "learning_rate": 4.651162790697675e-06,
43
- "loss": 1.2795,
44
  "step": 20
45
  },
46
  {
47
- "epoch": 0.0590318772136954,
48
- "grad_norm": 0.9300447218970413,
49
- "learning_rate": 5.8139534883720935e-06,
50
- "loss": 1.2542,
51
  "step": 25
52
  },
53
  {
54
- "epoch": 0.07083825265643448,
55
- "grad_norm": 0.7033484136291787,
56
- "learning_rate": 6.976744186046513e-06,
57
- "loss": 1.2254,
58
  "step": 30
59
  },
60
  {
61
- "epoch": 0.08264462809917356,
62
- "grad_norm": 0.5519511550883764,
63
- "learning_rate": 8.139534883720931e-06,
64
- "loss": 1.1956,
65
  "step": 35
66
  },
67
  {
68
- "epoch": 0.09445100354191263,
69
- "grad_norm": 0.530461549075621,
70
- "learning_rate": 9.30232558139535e-06,
71
- "loss": 1.1609,
72
  "step": 40
73
  },
74
  {
75
- "epoch": 0.10625737898465171,
76
- "grad_norm": 0.4882723618310303,
77
- "learning_rate": 9.999316524962347e-06,
78
- "loss": 1.1353,
79
  "step": 45
80
  },
81
  {
82
- "epoch": 0.1180637544273908,
83
- "grad_norm": 0.43070884824119315,
84
- "learning_rate": 9.991629576543164e-06,
85
- "loss": 1.1108,
86
  "step": 50
87
  },
88
  {
89
- "epoch": 0.12987012987012986,
90
- "grad_norm": 0.4326223460490506,
91
- "learning_rate": 9.975414512725058e-06,
92
- "loss": 1.0912,
93
  "step": 55
94
  },
95
  {
96
- "epoch": 0.14167650531286896,
97
- "grad_norm": 0.3990410735697587,
98
- "learning_rate": 9.95069903667256e-06,
99
- "loss": 1.0684,
100
  "step": 60
101
  },
102
  {
103
- "epoch": 0.15348288075560804,
104
- "grad_norm": 0.4102324061429566,
105
- "learning_rate": 9.917525374361913e-06,
106
- "loss": 1.0513,
107
  "step": 65
108
  },
109
  {
110
- "epoch": 0.1652892561983471,
111
- "grad_norm": 0.4553698218636962,
112
- "learning_rate": 9.8759502024387e-06,
113
- "loss": 1.0205,
114
  "step": 70
115
  },
116
  {
117
- "epoch": 0.1770956316410862,
118
- "grad_norm": 0.4487232032461124,
119
- "learning_rate": 9.826044551386743e-06,
120
- "loss": 0.9933,
121
  "step": 75
122
  },
123
  {
124
- "epoch": 0.18890200708382526,
125
- "grad_norm": 0.37710863024094676,
126
- "learning_rate": 9.767893684173722e-06,
127
- "loss": 0.9569,
128
  "step": 80
129
  },
130
  {
131
- "epoch": 0.20070838252656434,
132
- "grad_norm": 0.29780121644785873,
133
- "learning_rate": 9.701596950580807e-06,
134
- "loss": 0.944,
135
  "step": 85
136
  },
137
  {
138
- "epoch": 0.21251475796930341,
139
- "grad_norm": 0.2779699178964329,
140
- "learning_rate": 9.627267617465243e-06,
141
- "loss": 0.9348,
142
  "step": 90
143
  },
144
  {
145
- "epoch": 0.2243211334120425,
146
- "grad_norm": 0.25856375372275114,
147
- "learning_rate": 9.545032675245814e-06,
148
- "loss": 0.931,
149
  "step": 95
150
  },
151
  {
152
- "epoch": 0.2361275088547816,
153
- "grad_norm": 0.2613892946414014,
154
- "learning_rate": 9.45503262094184e-06,
155
- "loss": 0.9177,
156
  "step": 100
157
  },
158
  {
159
- "epoch": 0.24793388429752067,
160
- "grad_norm": 0.2354999119122035,
161
- "learning_rate": 9.357421218136387e-06,
162
- "loss": 0.9148,
163
  "step": 105
164
  },
165
  {
166
- "epoch": 0.2597402597402597,
167
- "grad_norm": 0.2162870999711504,
168
- "learning_rate": 9.252365234273754e-06,
169
- "loss": 0.9164,
170
  "step": 110
171
  },
172
  {
173
- "epoch": 0.2715466351829988,
174
- "grad_norm": 0.22180996969883549,
175
- "learning_rate": 9.140044155740102e-06,
176
- "loss": 0.9011,
177
  "step": 115
178
  },
179
  {
180
- "epoch": 0.2833530106257379,
181
- "grad_norm": 0.22437103522632068,
182
- "learning_rate": 9.02064988121396e-06,
183
- "loss": 0.9006,
184
  "step": 120
185
  },
186
  {
187
- "epoch": 0.29515938606847697,
188
- "grad_norm": 0.21463636431913852,
189
- "learning_rate": 8.894386393810563e-06,
190
- "loss": 0.8936,
191
  "step": 125
192
  },
193
  {
194
- "epoch": 0.3069657615112161,
195
- "grad_norm": 0.21659671484135407,
196
- "learning_rate": 8.761469412580126e-06,
197
- "loss": 0.892,
198
  "step": 130
199
  },
200
  {
201
- "epoch": 0.3187721369539551,
202
- "grad_norm": 0.22355054167698388,
203
- "learning_rate": 8.622126023955446e-06,
204
- "loss": 0.892,
205
  "step": 135
206
  },
207
  {
208
- "epoch": 0.3305785123966942,
209
- "grad_norm": 0.21653458201632308,
210
- "learning_rate": 8.476594293778561e-06,
211
- "loss": 0.8925,
212
  "step": 140
213
  },
214
  {
215
- "epoch": 0.34238488783943327,
216
- "grad_norm": 0.222501084583819,
217
- "learning_rate": 8.325122860569241e-06,
218
- "loss": 0.8864,
219
  "step": 145
220
  },
221
  {
222
- "epoch": 0.3541912632821724,
223
- "grad_norm": 0.21898064700237985,
224
- "learning_rate": 8.167970510730254e-06,
225
- "loss": 0.8849,
226
  "step": 150
227
  },
228
  {
229
- "epoch": 0.3659976387249115,
230
- "grad_norm": 0.22115057529251225,
231
- "learning_rate": 8.005405736415127e-06,
232
- "loss": 0.8815,
233
  "step": 155
234
  },
235
  {
236
- "epoch": 0.3778040141676505,
237
- "grad_norm": 0.21778385690198585,
238
- "learning_rate": 7.837706276813819e-06,
239
- "loss": 0.8806,
240
  "step": 160
241
  },
242
  {
243
- "epoch": 0.38961038961038963,
244
- "grad_norm": 0.2293476838871522,
245
- "learning_rate": 7.66515864363997e-06,
246
- "loss": 0.8781,
247
  "step": 165
248
  },
249
  {
250
- "epoch": 0.4014167650531287,
251
- "grad_norm": 0.23331956197796366,
252
- "learning_rate": 7.488057631630438e-06,
253
- "loss": 0.8742,
254
  "step": 170
255
  },
256
  {
257
- "epoch": 0.4132231404958678,
258
- "grad_norm": 0.21721254042866397,
259
- "learning_rate": 7.30670581489344e-06,
260
- "loss": 0.8805,
261
  "step": 175
262
  },
263
  {
264
- "epoch": 0.42502951593860683,
265
- "grad_norm": 0.23010261349577163,
266
- "learning_rate": 7.121413029965769e-06,
267
- "loss": 0.8742,
268
  "step": 180
269
  },
270
  {
271
- "epoch": 0.43683589138134593,
272
- "grad_norm": 0.2206787567833532,
273
- "learning_rate": 6.932495846462262e-06,
274
- "loss": 0.876,
275
  "step": 185
276
  },
277
  {
278
- "epoch": 0.448642266824085,
279
- "grad_norm": 0.2398992468500764,
280
- "learning_rate": 6.7402770262219234e-06,
281
- "loss": 0.8703,
282
  "step": 190
283
  },
284
  {
285
- "epoch": 0.4604486422668241,
286
- "grad_norm": 0.2448460311968614,
287
- "learning_rate": 6.545084971874738e-06,
288
- "loss": 0.873,
289
  "step": 195
290
  },
291
  {
292
- "epoch": 0.4722550177095632,
293
- "grad_norm": 0.2612008536174653,
294
- "learning_rate": 6.34725316577129e-06,
295
- "loss": 0.8746,
296
  "step": 200
297
  },
298
  {
299
- "epoch": 0.48406139315230223,
300
- "grad_norm": 0.24693316413096483,
301
- "learning_rate": 6.147119600233758e-06,
302
- "loss": 0.8736,
303
  "step": 205
304
  },
305
  {
306
- "epoch": 0.49586776859504134,
307
- "grad_norm": 0.24534026805051296,
308
- "learning_rate": 5.945026200101702e-06,
309
- "loss": 0.8652,
310
  "step": 210
311
  },
312
  {
313
- "epoch": 0.5076741440377804,
314
- "grad_norm": 0.23005356270231522,
315
- "learning_rate": 5.74131823855921e-06,
316
- "loss": 0.8624,
317
  "step": 215
318
  },
319
  {
320
- "epoch": 0.5194805194805194,
321
- "grad_norm": 0.24466967197626655,
322
- "learning_rate": 5.53634374724146e-06,
323
- "loss": 0.8639,
324
  "step": 220
325
  },
326
  {
327
- "epoch": 0.5312868949232585,
328
- "grad_norm": 0.24160067474761546,
329
- "learning_rate": 5.3304529216284974e-06,
330
- "loss": 0.8632,
331
  "step": 225
332
  },
333
  {
334
- "epoch": 0.5430932703659976,
335
- "grad_norm": 0.26220069503373417,
336
- "learning_rate": 5.123997522742151e-06,
337
- "loss": 0.8639,
338
  "step": 230
339
  },
340
  {
341
- "epoch": 0.5548996458087367,
342
- "grad_norm": 0.264066001522196,
343
- "learning_rate": 4.917330276168208e-06,
344
- "loss": 0.8623,
345
  "step": 235
346
  },
347
  {
348
- "epoch": 0.5667060212514758,
349
- "grad_norm": 0.24428345928123874,
350
- "learning_rate": 4.710804269430681e-06,
351
- "loss": 0.8692,
352
  "step": 240
353
  },
354
  {
355
- "epoch": 0.5785123966942148,
356
- "grad_norm": 0.24362157236851636,
357
- "learning_rate": 4.504772348747687e-06,
358
- "loss": 0.8617,
359
  "step": 245
360
  },
361
  {
362
- "epoch": 0.5903187721369539,
363
- "grad_norm": 0.2605786039459921,
364
- "learning_rate": 4.299586516199611e-06,
365
- "loss": 0.8631,
366
  "step": 250
367
  },
368
  {
369
- "epoch": 0.602125147579693,
370
- "grad_norm": 0.24469411057849033,
371
- "learning_rate": 4.0955973283394525e-06,
372
- "loss": 0.8519,
373
  "step": 255
374
  },
375
  {
376
- "epoch": 0.6139315230224321,
377
- "grad_norm": 0.24265909990188317,
378
- "learning_rate": 3.893153297272829e-06,
379
- "loss": 0.858,
380
  "step": 260
381
  },
382
  {
383
- "epoch": 0.6257378984651711,
384
- "grad_norm": 0.2431981393362141,
385
- "learning_rate": 3.6926002952309015e-06,
386
- "loss": 0.8574,
387
  "step": 265
388
  },
389
  {
390
- "epoch": 0.6375442739079102,
391
- "grad_norm": 0.25137244252341084,
392
- "learning_rate": 3.4942809636534637e-06,
393
- "loss": 0.8584,
394
  "step": 270
395
  },
396
  {
397
- "epoch": 0.6493506493506493,
398
- "grad_norm": 0.2520911165703089,
399
- "learning_rate": 3.298534127791785e-06,
400
- "loss": 0.8578,
401
  "step": 275
402
  },
403
  {
404
- "epoch": 0.6611570247933884,
405
- "grad_norm": 0.2651721936087408,
406
- "learning_rate": 3.105694217831361e-06,
407
- "loss": 0.8589,
408
  "step": 280
409
  },
410
  {
411
- "epoch": 0.6729634002361276,
412
- "grad_norm": 0.2546800394311146,
413
- "learning_rate": 2.9160906975235493e-06,
414
- "loss": 0.8523,
415
  "step": 285
416
  },
417
  {
418
- "epoch": 0.6847697756788665,
419
- "grad_norm": 0.25316819030742643,
420
- "learning_rate": 2.7300475013022666e-06,
421
- "loss": 0.8552,
422
  "step": 290
423
  },
424
  {
425
- "epoch": 0.6965761511216056,
426
- "grad_norm": 0.2509477836308168,
427
- "learning_rate": 2.5478824808474613e-06,
428
- "loss": 0.8574,
429
  "step": 295
430
  },
431
  {
432
- "epoch": 0.7083825265643447,
433
- "grad_norm": 0.22985580266469613,
434
- "learning_rate": 2.3699068620408305e-06,
435
- "loss": 0.8525,
436
  "step": 300
437
  },
438
  {
439
- "epoch": 0.7201889020070839,
440
- "grad_norm": 0.24633435787531885,
441
- "learning_rate": 2.1964247132416373e-06,
442
- "loss": 0.8541,
443
  "step": 305
444
  },
445
  {
446
- "epoch": 0.731995277449823,
447
- "grad_norm": 0.2550223994617769,
448
- "learning_rate": 2.027732425791011e-06,
449
- "loss": 0.8575,
450
  "step": 310
451
  },
452
  {
453
- "epoch": 0.743801652892562,
454
- "grad_norm": 0.250598594634277,
455
- "learning_rate": 1.864118207632315e-06,
456
- "loss": 0.8584,
457
  "step": 315
458
  },
459
  {
460
- "epoch": 0.755608028335301,
461
- "grad_norm": 0.2547611444118788,
462
- "learning_rate": 1.7058615909127102e-06,
463
- "loss": 0.8578,
464
  "step": 320
465
  },
466
  {
467
- "epoch": 0.7674144037780402,
468
- "grad_norm": 0.2551344265566289,
469
- "learning_rate": 1.5532329544071712e-06,
470
- "loss": 0.8568,
471
  "step": 325
472
  },
473
  {
474
- "epoch": 0.7792207792207793,
475
- "grad_norm": 0.24284035631227585,
476
- "learning_rate": 1.406493061580881e-06,
477
- "loss": 0.854,
478
  "step": 330
479
  },
480
  {
481
- "epoch": 0.7910271546635183,
482
- "grad_norm": 0.2454427828284572,
483
- "learning_rate": 1.2658926150792321e-06,
484
- "loss": 0.8543,
485
  "step": 335
486
  },
487
  {
488
- "epoch": 0.8028335301062574,
489
- "grad_norm": 0.24171894401645216,
490
- "learning_rate": 1.1316718284065536e-06,
491
- "loss": 0.8601,
492
  "step": 340
493
  },
494
  {
495
- "epoch": 0.8146399055489965,
496
- "grad_norm": 0.24890059165826273,
497
- "learning_rate": 1.0040600155253766e-06,
498
- "loss": 0.8588,
499
  "step": 345
500
  },
501
  {
502
- "epoch": 0.8264462809917356,
503
- "grad_norm": 0.24128541650834764,
504
- "learning_rate": 8.832751990773714e-07,
505
- "loss": 0.8547,
506
  "step": 350
507
  },
508
  {
509
- "epoch": 0.8382526564344747,
510
- "grad_norm": 0.25646382446616656,
511
- "learning_rate": 7.695237378953224e-07,
512
- "loss": 0.859,
513
  "step": 355
514
  },
515
  {
516
- "epoch": 0.8500590318772137,
517
- "grad_norm": 0.2460481416324142,
518
- "learning_rate": 6.629999744425236e-07,
519
- "loss": 0.8567,
520
  "step": 360
521
  },
522
  {
523
- "epoch": 0.8618654073199528,
524
- "grad_norm": 0.24513983983677975,
525
- "learning_rate": 5.63885902781941e-07,
526
- "loss": 0.8596,
527
  "step": 365
528
  },
529
  {
530
- "epoch": 0.8736717827626919,
531
- "grad_norm": 0.2343335791217957,
532
- "learning_rate": 4.723508576424063e-07,
533
- "loss": 0.8552,
534
  "step": 370
535
  },
536
  {
537
- "epoch": 0.885478158205431,
538
- "grad_norm": 0.2519214484705661,
539
- "learning_rate": 3.885512251130763e-07,
540
- "loss": 0.8527,
541
  "step": 375
542
  },
543
  {
544
- "epoch": 0.89728453364817,
545
- "grad_norm": 0.2440125511079392,
546
- "learning_rate": 3.126301754604233e-07,
547
- "loss": 0.8548,
548
  "step": 380
549
  },
550
  {
551
- "epoch": 0.9090909090909091,
552
- "grad_norm": 0.2532464399771227,
553
- "learning_rate": 2.447174185242324e-07,
554
- "loss": 0.854,
555
  "step": 385
556
  },
557
  {
558
- "epoch": 0.9208972845336482,
559
- "grad_norm": 0.23622376914066623,
560
- "learning_rate": 1.849289821105199e-07,
561
- "loss": 0.8521,
562
  "step": 390
563
  },
564
  {
565
- "epoch": 0.9327036599763873,
566
- "grad_norm": 0.2444798272238538,
567
- "learning_rate": 1.333670137599713e-07,
568
- "loss": 0.8532,
569
  "step": 395
570
  },
571
  {
572
- "epoch": 0.9445100354191264,
573
- "grad_norm": 0.23429527809735437,
574
- "learning_rate": 9.011960623058202e-08,
575
- "loss": 0.8532,
576
  "step": 400
577
  },
578
  {
579
- "epoch": 0.9563164108618654,
580
- "grad_norm": 0.23623057232372727,
581
- "learning_rate": 5.526064699265754e-08,
582
- "loss": 0.8506,
583
  "step": 405
584
  },
585
  {
586
- "epoch": 0.9681227863046045,
587
- "grad_norm": 0.2482774375940229,
588
- "learning_rate": 2.884969199331178e-08,
589
- "loss": 0.8587,
590
  "step": 410
591
  },
592
  {
593
- "epoch": 0.9799291617473436,
594
- "grad_norm": 0.23526019800815218,
595
- "learning_rate": 1.0931863906127327e-08,
596
- "loss": 0.8558,
597
  "step": 415
598
  },
599
  {
600
- "epoch": 0.9917355371900827,
601
- "grad_norm": 0.24074172289610016,
602
- "learning_rate": 1.53777503982655e-09,
603
- "loss": 0.853,
604
  "step": 420
605
  },
606
  {
607
- "epoch": 0.9988193624557261,
608
- "eval_loss": 0.8673050403594971,
609
- "eval_runtime": 1.109,
610
- "eval_samples_per_second": 1.804,
611
- "eval_steps_per_second": 0.902,
612
- "step": 423
613
  },
614
  {
615
- "epoch": 0.9988193624557261,
616
- "step": 423,
617
- "total_flos": 1701364321222656.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
618
  "train_loss": 0.0,
619
- "train_runtime": 0.0113,
620
- "train_samples_per_second": 2398604.87,
621
- "train_steps_per_second": 37460.213
622
  }
623
  ],
624
  "logging_steps": 5,
625
- "max_steps": 423,
626
  "num_input_tokens_seen": 0,
627
  "num_train_epochs": 1,
628
  "save_steps": 100,
@@ -638,8 +1352,8 @@
638
  "attributes": {}
639
  }
640
  },
641
- "total_flos": 1701364321222656.0,
642
- "train_batch_size": 8,
643
  "trial_name": null,
644
  "trial_params": null
645
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9997315436241611,
5
  "eval_steps": 500,
6
+ "global_step": 931,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0010738255033557046,
13
+ "grad_norm": 22.873438223486826,
14
+ "learning_rate": 1.0638297872340426e-07,
15
+ "loss": 1.3013,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.005369127516778523,
20
+ "grad_norm": 21.159527745955735,
21
+ "learning_rate": 5.319148936170213e-07,
22
+ "loss": 1.3281,
23
  "step": 5
24
  },
25
  {
26
+ "epoch": 0.010738255033557046,
27
+ "grad_norm": 8.15243855646548,
28
+ "learning_rate": 1.0638297872340427e-06,
29
+ "loss": 1.1934,
30
  "step": 10
31
  },
32
  {
33
+ "epoch": 0.016107382550335572,
34
+ "grad_norm": 7.876837115931939,
35
+ "learning_rate": 1.595744680851064e-06,
36
+ "loss": 1.0403,
37
  "step": 15
38
  },
39
  {
40
+ "epoch": 0.021476510067114093,
41
+ "grad_norm": 3.0351073481295128,
42
+ "learning_rate": 2.1276595744680853e-06,
43
+ "loss": 0.9152,
44
  "step": 20
45
  },
46
  {
47
+ "epoch": 0.026845637583892617,
48
+ "grad_norm": 2.4261598045634685,
49
+ "learning_rate": 2.6595744680851065e-06,
50
+ "loss": 0.8749,
51
  "step": 25
52
  },
53
  {
54
+ "epoch": 0.032214765100671144,
55
+ "grad_norm": 2.216819042623432,
56
+ "learning_rate": 3.191489361702128e-06,
57
+ "loss": 0.8366,
58
  "step": 30
59
  },
60
  {
61
+ "epoch": 0.03758389261744966,
62
+ "grad_norm": 2.282450409349314,
63
+ "learning_rate": 3.723404255319149e-06,
64
+ "loss": 0.818,
65
  "step": 35
66
  },
67
  {
68
+ "epoch": 0.042953020134228186,
69
+ "grad_norm": 2.2660596456862643,
70
+ "learning_rate": 4.255319148936171e-06,
71
+ "loss": 0.7985,
72
  "step": 40
73
  },
74
  {
75
+ "epoch": 0.04832214765100671,
76
+ "grad_norm": 2.367380807091964,
77
+ "learning_rate": 4.787234042553192e-06,
78
+ "loss": 0.7886,
79
  "step": 45
80
  },
81
  {
82
+ "epoch": 0.053691275167785234,
83
+ "grad_norm": 2.385525049214499,
84
+ "learning_rate": 5.319148936170213e-06,
85
+ "loss": 0.7777,
86
  "step": 50
87
  },
88
  {
89
+ "epoch": 0.05906040268456376,
90
+ "grad_norm": 2.5856947730866446,
91
+ "learning_rate": 5.851063829787235e-06,
92
+ "loss": 0.7546,
93
  "step": 55
94
  },
95
  {
96
+ "epoch": 0.06442953020134229,
97
+ "grad_norm": 2.4085256812489084,
98
+ "learning_rate": 6.382978723404256e-06,
99
+ "loss": 0.7524,
100
  "step": 60
101
  },
102
  {
103
+ "epoch": 0.0697986577181208,
104
+ "grad_norm": 2.4401920680003157,
105
+ "learning_rate": 6.914893617021278e-06,
106
+ "loss": 0.7291,
107
  "step": 65
108
  },
109
  {
110
+ "epoch": 0.07516778523489932,
111
+ "grad_norm": 2.3253305278745215,
112
+ "learning_rate": 7.446808510638298e-06,
113
+ "loss": 0.7272,
114
  "step": 70
115
  },
116
  {
117
+ "epoch": 0.08053691275167785,
118
+ "grad_norm": 2.45450168917715,
119
+ "learning_rate": 7.97872340425532e-06,
120
+ "loss": 0.7127,
121
  "step": 75
122
  },
123
  {
124
+ "epoch": 0.08590604026845637,
125
+ "grad_norm": 2.127447895073466,
126
+ "learning_rate": 8.510638297872341e-06,
127
+ "loss": 0.7041,
128
  "step": 80
129
  },
130
  {
131
+ "epoch": 0.0912751677852349,
132
+ "grad_norm": 2.3028444099891705,
133
+ "learning_rate": 9.042553191489362e-06,
134
+ "loss": 0.7037,
135
  "step": 85
136
  },
137
  {
138
+ "epoch": 0.09664429530201342,
139
+ "grad_norm": 2.5309118167527616,
140
+ "learning_rate": 9.574468085106385e-06,
141
+ "loss": 0.6988,
142
  "step": 90
143
  },
144
  {
145
+ "epoch": 0.10201342281879194,
146
+ "grad_norm": 2.4219930767340743,
147
+ "learning_rate": 9.999964780082996e-06,
148
+ "loss": 0.6903,
149
  "step": 95
150
  },
151
  {
152
+ "epoch": 0.10738255033557047,
153
+ "grad_norm": 2.111757405385662,
154
+ "learning_rate": 9.998732135085665e-06,
155
+ "loss": 0.6801,
156
  "step": 100
157
  },
158
  {
159
+ "epoch": 0.11275167785234899,
160
+ "grad_norm": 2.2202581015134077,
161
+ "learning_rate": 9.995738990383743e-06,
162
+ "loss": 0.6907,
163
  "step": 105
164
  },
165
  {
166
+ "epoch": 0.11812080536912752,
167
+ "grad_norm": 2.626765418959801,
168
+ "learning_rate": 9.990986400130607e-06,
169
+ "loss": 0.686,
170
  "step": 110
171
  },
172
  {
173
+ "epoch": 0.12348993288590604,
174
+ "grad_norm": 2.2905312730653424,
175
+ "learning_rate": 9.984476038137437e-06,
176
+ "loss": 0.6687,
177
  "step": 115
178
  },
179
  {
180
+ "epoch": 0.12885906040268458,
181
+ "grad_norm": 2.190663676539029,
182
+ "learning_rate": 9.97621019728372e-06,
183
+ "loss": 0.6776,
184
  "step": 120
185
  },
186
  {
187
+ "epoch": 0.1342281879194631,
188
+ "grad_norm": 2.2515596629136825,
189
+ "learning_rate": 9.966191788709716e-06,
190
+ "loss": 0.6694,
191
  "step": 125
192
  },
193
  {
194
+ "epoch": 0.1395973154362416,
195
+ "grad_norm": 2.168375563832784,
196
+ "learning_rate": 9.954424340791195e-06,
197
+ "loss": 0.6752,
198
  "step": 130
199
  },
200
  {
201
+ "epoch": 0.14496644295302014,
202
+ "grad_norm": 2.088272443654766,
203
+ "learning_rate": 9.940911997896774e-06,
204
+ "loss": 0.6661,
205
  "step": 135
206
  },
207
  {
208
+ "epoch": 0.15033557046979865,
209
+ "grad_norm": 2.173874083902431,
210
+ "learning_rate": 9.925659518928316e-06,
211
+ "loss": 0.6563,
212
  "step": 140
213
  },
214
  {
215
+ "epoch": 0.15570469798657718,
216
+ "grad_norm": 2.0589658736370757,
217
+ "learning_rate": 9.908672275644898e-06,
218
+ "loss": 0.6607,
219
  "step": 145
220
  },
221
  {
222
+ "epoch": 0.1610738255033557,
223
+ "grad_norm": 2.147351094566772,
224
+ "learning_rate": 9.889956250770933e-06,
225
+ "loss": 0.6575,
226
  "step": 150
227
  },
228
  {
229
+ "epoch": 0.16644295302013423,
230
+ "grad_norm": 2.3349226051109584,
231
+ "learning_rate": 9.86951803588912e-06,
232
+ "loss": 0.6527,
233
  "step": 155
234
  },
235
  {
236
+ "epoch": 0.17181208053691274,
237
+ "grad_norm": 2.281471579219723,
238
+ "learning_rate": 9.847364829118963e-06,
239
+ "loss": 0.6544,
240
  "step": 160
241
  },
242
  {
243
+ "epoch": 0.17718120805369128,
244
+ "grad_norm": 2.1332428982031715,
245
+ "learning_rate": 9.82350443258166e-06,
246
+ "loss": 0.6479,
247
  "step": 165
248
  },
249
  {
250
+ "epoch": 0.1825503355704698,
251
+ "grad_norm": 2.0804446588419045,
252
+ "learning_rate": 9.797945249652295e-06,
253
+ "loss": 0.6439,
254
  "step": 170
255
  },
256
  {
257
+ "epoch": 0.18791946308724833,
258
+ "grad_norm": 2.0585417674642636,
259
+ "learning_rate": 9.770696282000245e-06,
260
+ "loss": 0.6321,
261
  "step": 175
262
  },
263
  {
264
+ "epoch": 0.19328859060402684,
265
+ "grad_norm": 2.1666332876300762,
266
+ "learning_rate": 9.741767126418898e-06,
267
+ "loss": 0.6336,
268
  "step": 180
269
  },
270
  {
271
+ "epoch": 0.19865771812080538,
272
+ "grad_norm": 2.0205631171866543,
273
+ "learning_rate": 9.711167971445766e-06,
274
+ "loss": 0.6306,
275
  "step": 185
276
  },
277
  {
278
+ "epoch": 0.2040268456375839,
279
+ "grad_norm": 2.4514209883366807,
280
+ "learning_rate": 9.67890959377418e-06,
281
+ "loss": 0.624,
282
  "step": 190
283
  },
284
  {
285
+ "epoch": 0.20939597315436242,
286
+ "grad_norm": 2.011077236989635,
287
+ "learning_rate": 9.645003354457872e-06,
288
+ "loss": 0.6176,
289
  "step": 195
290
  },
291
  {
292
+ "epoch": 0.21476510067114093,
293
+ "grad_norm": 2.157067702163037,
294
+ "learning_rate": 9.60946119490972e-06,
295
+ "loss": 0.6231,
296
  "step": 200
297
  },
298
  {
299
+ "epoch": 0.22013422818791947,
300
+ "grad_norm": 1.851876954712529,
301
+ "learning_rate": 9.57229563269612e-06,
302
+ "loss": 0.6203,
303
  "step": 205
304
  },
305
  {
306
+ "epoch": 0.22550335570469798,
307
+ "grad_norm": 1.9636046456639136,
308
+ "learning_rate": 9.533519757128426e-06,
309
+ "loss": 0.6186,
310
  "step": 210
311
  },
312
  {
313
+ "epoch": 0.23087248322147652,
314
+ "grad_norm": 2.374396960285375,
315
+ "learning_rate": 9.49314722465304e-06,
316
+ "loss": 0.6157,
317
  "step": 215
318
  },
319
  {
320
+ "epoch": 0.23624161073825503,
321
+ "grad_norm": 2.072084337879624,
322
+ "learning_rate": 9.451192254041759e-06,
323
+ "loss": 0.6157,
324
  "step": 220
325
  },
326
  {
327
+ "epoch": 0.24161073825503357,
328
+ "grad_norm": 2.0213326681347605,
329
+ "learning_rate": 9.407669621384073e-06,
330
+ "loss": 0.619,
331
  "step": 225
332
  },
333
  {
334
+ "epoch": 0.24697986577181208,
335
+ "grad_norm": 2.05541918839593,
336
+ "learning_rate": 9.362594654883185e-06,
337
+ "loss": 0.6043,
338
  "step": 230
339
  },
340
  {
341
+ "epoch": 0.2523489932885906,
342
+ "grad_norm": 1.9647313060735423,
343
+ "learning_rate": 9.31598322945759e-06,
344
+ "loss": 0.5877,
345
  "step": 235
346
  },
347
  {
348
+ "epoch": 0.25771812080536916,
349
+ "grad_norm": 2.321446787508396,
350
+ "learning_rate": 9.267851761150092e-06,
351
+ "loss": 0.6038,
352
  "step": 240
353
  },
354
  {
355
+ "epoch": 0.26308724832214764,
356
+ "grad_norm": 1.954968634006319,
357
+ "learning_rate": 9.218217201346251e-06,
358
+ "loss": 0.5973,
359
  "step": 245
360
  },
361
  {
362
+ "epoch": 0.2684563758389262,
363
+ "grad_norm": 1.9588200511779104,
364
+ "learning_rate": 9.167097030804289e-06,
365
+ "loss": 0.5952,
366
  "step": 250
367
  },
368
  {
369
+ "epoch": 0.2738255033557047,
370
+ "grad_norm": 1.978200194451611,
371
+ "learning_rate": 9.114509253498554e-06,
372
+ "loss": 0.5956,
373
  "step": 255
374
  },
375
  {
376
+ "epoch": 0.2791946308724832,
377
+ "grad_norm": 1.9082868769634187,
378
+ "learning_rate": 9.060472390278717e-06,
379
+ "loss": 0.5834,
380
  "step": 260
381
  },
382
  {
383
+ "epoch": 0.28456375838926173,
384
+ "grad_norm": 1.9947479259010255,
385
+ "learning_rate": 9.005005472346923e-06,
386
+ "loss": 0.5878,
387
  "step": 265
388
  },
389
  {
390
+ "epoch": 0.28993288590604027,
391
+ "grad_norm": 2.0101949394033443,
392
+ "learning_rate": 8.948128034555212e-06,
393
+ "loss": 0.5815,
394
  "step": 270
395
  },
396
  {
397
+ "epoch": 0.2953020134228188,
398
+ "grad_norm": 1.855173856877532,
399
+ "learning_rate": 8.889860108525544e-06,
400
+ "loss": 0.5749,
401
  "step": 275
402
  },
403
  {
404
+ "epoch": 0.3006711409395973,
405
+ "grad_norm": 1.8947595029633266,
406
+ "learning_rate": 8.83022221559489e-06,
407
+ "loss": 0.5597,
408
  "step": 280
409
  },
410
  {
411
+ "epoch": 0.30604026845637583,
412
+ "grad_norm": 2.0225835192845825,
413
+ "learning_rate": 8.76923535958783e-06,
414
+ "loss": 0.5683,
415
  "step": 285
416
  },
417
  {
418
+ "epoch": 0.31140939597315437,
419
+ "grad_norm": 2.047769165835403,
420
+ "learning_rate": 8.706921019419237e-06,
421
+ "loss": 0.5693,
422
  "step": 290
423
  },
424
  {
425
+ "epoch": 0.3167785234899329,
426
+ "grad_norm": 1.996841996415469,
427
+ "learning_rate": 8.643301141529619e-06,
428
+ "loss": 0.576,
429
  "step": 295
430
  },
431
  {
432
+ "epoch": 0.3221476510067114,
433
+ "grad_norm": 2.085317513003649,
434
+ "learning_rate": 8.578398132155846e-06,
435
+ "loss": 0.5586,
436
  "step": 300
437
  },
438
  {
439
+ "epoch": 0.3275167785234899,
440
+ "grad_norm": 2.2696870964241342,
441
+ "learning_rate": 8.512234849439887e-06,
442
+ "loss": 0.5487,
443
  "step": 305
444
  },
445
  {
446
+ "epoch": 0.33288590604026846,
447
+ "grad_norm": 2.295292164613049,
448
+ "learning_rate": 8.444834595378434e-06,
449
+ "loss": 0.5657,
450
  "step": 310
451
  },
452
  {
453
+ "epoch": 0.338255033557047,
454
+ "grad_norm": 1.9535177447525518,
455
+ "learning_rate": 8.376221107616187e-06,
456
+ "loss": 0.5492,
457
  "step": 315
458
  },
459
  {
460
+ "epoch": 0.3436241610738255,
461
+ "grad_norm": 1.935697674348923,
462
+ "learning_rate": 8.306418551085707e-06,
463
+ "loss": 0.5502,
464
  "step": 320
465
  },
466
  {
467
+ "epoch": 0.348993288590604,
468
+ "grad_norm": 1.912637650045647,
469
+ "learning_rate": 8.23545150949679e-06,
470
+ "loss": 0.5659,
471
  "step": 325
472
  },
473
  {
474
+ "epoch": 0.35436241610738256,
475
+ "grad_norm": 2.0813900852367437,
476
+ "learning_rate": 8.163344976678342e-06,
477
+ "loss": 0.5512,
478
  "step": 330
479
  },
480
  {
481
+ "epoch": 0.3597315436241611,
482
+ "grad_norm": 1.9178576647529397,
483
+ "learning_rate": 8.090124347775837e-06,
484
+ "loss": 0.5399,
485
  "step": 335
486
  },
487
  {
488
+ "epoch": 0.3651006711409396,
489
+ "grad_norm": 1.9921209746990098,
490
+ "learning_rate": 8.0158154103074e-06,
491
+ "loss": 0.5335,
492
  "step": 340
493
  },
494
  {
495
+ "epoch": 0.3704697986577181,
496
+ "grad_norm": 2.0251179740048686,
497
+ "learning_rate": 7.940444335081733e-06,
498
+ "loss": 0.5412,
499
  "step": 345
500
  },
501
  {
502
+ "epoch": 0.37583892617449666,
503
+ "grad_norm": 2.0237480393180536,
504
+ "learning_rate": 7.864037666981037e-06,
505
+ "loss": 0.5318,
506
  "step": 350
507
  },
508
  {
509
+ "epoch": 0.3812080536912752,
510
+ "grad_norm": 2.062903496053255,
511
+ "learning_rate": 7.786622315612182e-06,
512
+ "loss": 0.5391,
513
  "step": 355
514
  },
515
  {
516
+ "epoch": 0.3865771812080537,
517
+ "grad_norm": 1.931828289441635,
518
+ "learning_rate": 7.708225545829446e-06,
519
+ "loss": 0.525,
520
  "step": 360
521
  },
522
  {
523
+ "epoch": 0.3919463087248322,
524
+ "grad_norm": 2.004972595205748,
525
+ "learning_rate": 7.6288749681321115e-06,
526
+ "loss": 0.5173,
527
  "step": 365
528
  },
529
  {
530
+ "epoch": 0.39731543624161075,
531
+ "grad_norm": 2.0462367199517937,
532
+ "learning_rate": 7.548598528940354e-06,
533
+ "loss": 0.5154,
534
  "step": 370
535
  },
536
  {
537
+ "epoch": 0.40268456375838924,
538
+ "grad_norm": 2.3727598847724636,
539
+ "learning_rate": 7.4674245007528135e-06,
540
+ "loss": 0.5279,
541
  "step": 375
542
  },
543
  {
544
+ "epoch": 0.4080536912751678,
545
+ "grad_norm": 1.965518745162171,
546
+ "learning_rate": 7.385381472189321e-06,
547
+ "loss": 0.5255,
548
  "step": 380
549
  },
550
  {
551
+ "epoch": 0.4134228187919463,
552
+ "grad_norm": 2.0728393681212225,
553
+ "learning_rate": 7.302498337922293e-06,
554
+ "loss": 0.5152,
555
  "step": 385
556
  },
557
  {
558
+ "epoch": 0.41879194630872485,
559
+ "grad_norm": 2.0074609174878906,
560
+ "learning_rate": 7.218804288500343e-06,
561
+ "loss": 0.5224,
562
  "step": 390
563
  },
564
  {
565
+ "epoch": 0.42416107382550333,
566
+ "grad_norm": 1.9472600550525063,
567
+ "learning_rate": 7.134328800067684e-06,
568
+ "loss": 0.5209,
569
  "step": 395
570
  },
571
  {
572
+ "epoch": 0.42953020134228187,
573
+ "grad_norm": 1.9949043902713617,
574
+ "learning_rate": 7.049101623982938e-06,
575
+ "loss": 0.5082,
576
  "step": 400
577
  },
578
  {
579
+ "epoch": 0.4348993288590604,
580
+ "grad_norm": 1.834654212769176,
581
+ "learning_rate": 6.963152776341044e-06,
582
+ "loss": 0.5149,
583
  "step": 405
584
  },
585
  {
586
+ "epoch": 0.44026845637583895,
587
+ "grad_norm": 2.0502871429630822,
588
+ "learning_rate": 6.876512527401897e-06,
589
+ "loss": 0.5131,
590
  "step": 410
591
  },
592
  {
593
+ "epoch": 0.44563758389261743,
594
+ "grad_norm": 2.000365576779467,
595
+ "learning_rate": 6.789211390929497e-06,
596
+ "loss": 0.5016,
597
  "step": 415
598
  },
599
  {
600
+ "epoch": 0.45100671140939597,
601
+ "grad_norm": 2.0157623562806353,
602
+ "learning_rate": 6.701280113445324e-06,
603
+ "loss": 0.4958,
604
  "step": 420
605
  },
606
  {
607
+ "epoch": 0.4563758389261745,
608
+ "grad_norm": 1.970116584111867,
609
+ "learning_rate": 6.6127496633997475e-06,
610
+ "loss": 0.5003,
611
+ "step": 425
 
612
  },
613
  {
614
+ "epoch": 0.46174496644295304,
615
+ "grad_norm": 1.9483867090820592,
616
+ "learning_rate": 6.523651220265269e-06,
617
+ "loss": 0.4942,
618
+ "step": 430
619
+ },
620
+ {
621
+ "epoch": 0.4671140939597315,
622
+ "grad_norm": 2.099467823611523,
623
+ "learning_rate": 6.434016163555452e-06,
624
+ "loss": 0.4952,
625
+ "step": 435
626
+ },
627
+ {
628
+ "epoch": 0.47248322147651006,
629
+ "grad_norm": 1.9558069283541033,
630
+ "learning_rate": 6.343876061773385e-06,
631
+ "loss": 0.4924,
632
+ "step": 440
633
+ },
634
+ {
635
+ "epoch": 0.4778523489932886,
636
+ "grad_norm": 1.9847822609941113,
637
+ "learning_rate": 6.2532626612936035e-06,
638
+ "loss": 0.4823,
639
+ "step": 445
640
+ },
641
+ {
642
+ "epoch": 0.48322147651006714,
643
+ "grad_norm": 2.0688837915005993,
644
+ "learning_rate": 6.162207875181354e-06,
645
+ "loss": 0.4913,
646
+ "step": 450
647
+ },
648
+ {
649
+ "epoch": 0.4885906040268456,
650
+ "grad_norm": 1.9342878293544008,
651
+ "learning_rate": 6.070743771953157e-06,
652
+ "loss": 0.4769,
653
+ "step": 455
654
+ },
655
+ {
656
+ "epoch": 0.49395973154362416,
657
+ "grad_norm": 2.043215000887027,
658
+ "learning_rate": 5.978902564282616e-06,
659
+ "loss": 0.4752,
660
+ "step": 460
661
+ },
662
+ {
663
+ "epoch": 0.4993288590604027,
664
+ "grad_norm": 1.8676181620631764,
665
+ "learning_rate": 5.886716597655472e-06,
666
+ "loss": 0.4701,
667
+ "step": 465
668
+ },
669
+ {
670
+ "epoch": 0.5046979865771812,
671
+ "grad_norm": 1.9595541482385936,
672
+ "learning_rate": 5.7942183389778536e-06,
673
+ "loss": 0.4696,
674
+ "step": 470
675
+ },
676
+ {
677
+ "epoch": 0.5100671140939598,
678
+ "grad_norm": 1.825167080567174,
679
+ "learning_rate": 5.701440365141799e-06,
680
+ "loss": 0.4625,
681
+ "step": 475
682
+ },
683
+ {
684
+ "epoch": 0.5154362416107383,
685
+ "grad_norm": 1.9786129201297613,
686
+ "learning_rate": 5.608415351552014e-06,
687
+ "loss": 0.4596,
688
+ "step": 480
689
+ },
690
+ {
691
+ "epoch": 0.5208053691275167,
692
+ "grad_norm": 1.9920386733808053,
693
+ "learning_rate": 5.515176060617945e-06,
694
+ "loss": 0.4684,
695
+ "step": 485
696
+ },
697
+ {
698
+ "epoch": 0.5261744966442953,
699
+ "grad_norm": 1.9116799488281817,
700
+ "learning_rate": 5.421755330215223e-06,
701
+ "loss": 0.4659,
702
+ "step": 490
703
+ },
704
+ {
705
+ "epoch": 0.5315436241610738,
706
+ "grad_norm": 1.948129234992053,
707
+ "learning_rate": 5.328186062120509e-06,
708
+ "loss": 0.4436,
709
+ "step": 495
710
+ },
711
+ {
712
+ "epoch": 0.5369127516778524,
713
+ "grad_norm": 2.078748185289108,
714
+ "learning_rate": 5.23450121042383e-06,
715
+ "loss": 0.4548,
716
+ "step": 500
717
+ },
718
+ {
719
+ "epoch": 0.5422818791946309,
720
+ "grad_norm": 1.9444657978297817,
721
+ "learning_rate": 5.140733769922525e-06,
722
+ "loss": 0.4533,
723
+ "step": 505
724
+ },
725
+ {
726
+ "epoch": 0.5476510067114094,
727
+ "grad_norm": 1.9186109778492548,
728
+ "learning_rate": 5.0469167645008245e-06,
729
+ "loss": 0.4484,
730
+ "step": 510
731
+ },
732
+ {
733
+ "epoch": 0.553020134228188,
734
+ "grad_norm": 2.115910772434904,
735
+ "learning_rate": 4.953083235499177e-06,
736
+ "loss": 0.4557,
737
+ "step": 515
738
+ },
739
+ {
740
+ "epoch": 0.5583892617449664,
741
+ "grad_norm": 1.9469832075906568,
742
+ "learning_rate": 4.859266230077474e-06,
743
+ "loss": 0.4439,
744
+ "step": 520
745
+ },
746
+ {
747
+ "epoch": 0.5637583892617449,
748
+ "grad_norm": 1.90470271131317,
749
+ "learning_rate": 4.7654987895761705e-06,
750
+ "loss": 0.4431,
751
+ "step": 525
752
+ },
753
+ {
754
+ "epoch": 0.5691275167785235,
755
+ "grad_norm": 1.941709380965833,
756
+ "learning_rate": 4.671813937879494e-06,
757
+ "loss": 0.4517,
758
+ "step": 530
759
+ },
760
+ {
761
+ "epoch": 0.574496644295302,
762
+ "grad_norm": 1.9633282455666656,
763
+ "learning_rate": 4.5782446697847775e-06,
764
+ "loss": 0.4469,
765
+ "step": 535
766
+ },
767
+ {
768
+ "epoch": 0.5798657718120805,
769
+ "grad_norm": 1.8845576495595482,
770
+ "learning_rate": 4.484823939382056e-06,
771
+ "loss": 0.4316,
772
+ "step": 540
773
+ },
774
+ {
775
+ "epoch": 0.5852348993288591,
776
+ "grad_norm": 1.8811789086409214,
777
+ "learning_rate": 4.391584648447989e-06,
778
+ "loss": 0.4423,
779
+ "step": 545
780
+ },
781
+ {
782
+ "epoch": 0.5906040268456376,
783
+ "grad_norm": 1.9555309537231733,
784
+ "learning_rate": 4.298559634858202e-06,
785
+ "loss": 0.4368,
786
+ "step": 550
787
+ },
788
+ {
789
+ "epoch": 0.5959731543624162,
790
+ "grad_norm": 2.0555513727023578,
791
+ "learning_rate": 4.205781661022146e-06,
792
+ "loss": 0.4298,
793
+ "step": 555
794
+ },
795
+ {
796
+ "epoch": 0.6013422818791946,
797
+ "grad_norm": 1.8777742161685087,
798
+ "learning_rate": 4.1132834023445304e-06,
799
+ "loss": 0.426,
800
+ "step": 560
801
+ },
802
+ {
803
+ "epoch": 0.6067114093959731,
804
+ "grad_norm": 1.939266556920773,
805
+ "learning_rate": 4.021097435717386e-06,
806
+ "loss": 0.4216,
807
+ "step": 565
808
+ },
809
+ {
810
+ "epoch": 0.6120805369127517,
811
+ "grad_norm": 1.9261147196325419,
812
+ "learning_rate": 3.929256228046845e-06,
813
+ "loss": 0.4285,
814
+ "step": 570
815
+ },
816
+ {
817
+ "epoch": 0.6174496644295302,
818
+ "grad_norm": 1.805107172046572,
819
+ "learning_rate": 3.837792124818647e-06,
820
+ "loss": 0.416,
821
+ "step": 575
822
+ },
823
+ {
824
+ "epoch": 0.6228187919463087,
825
+ "grad_norm": 1.8704231334410426,
826
+ "learning_rate": 3.7467373387063973e-06,
827
+ "loss": 0.4191,
828
+ "step": 580
829
+ },
830
+ {
831
+ "epoch": 0.6281879194630873,
832
+ "grad_norm": 1.9935959039830586,
833
+ "learning_rate": 3.656123938226618e-06,
834
+ "loss": 0.4183,
835
+ "step": 585
836
+ },
837
+ {
838
+ "epoch": 0.6335570469798658,
839
+ "grad_norm": 1.9248727434625008,
840
+ "learning_rate": 3.5659838364445505e-06,
841
+ "loss": 0.4239,
842
+ "step": 590
843
+ },
844
+ {
845
+ "epoch": 0.6389261744966444,
846
+ "grad_norm": 1.9448194253706668,
847
+ "learning_rate": 3.476348779734732e-06,
848
+ "loss": 0.4118,
849
+ "step": 595
850
+ },
851
+ {
852
+ "epoch": 0.6442953020134228,
853
+ "grad_norm": 1.8244567974798083,
854
+ "learning_rate": 3.387250336600254e-06,
855
+ "loss": 0.4108,
856
+ "step": 600
857
+ },
858
+ {
859
+ "epoch": 0.6496644295302013,
860
+ "grad_norm": 1.8766359486725541,
861
+ "learning_rate": 3.298719886554677e-06,
862
+ "loss": 0.4094,
863
+ "step": 605
864
+ },
865
+ {
866
+ "epoch": 0.6550335570469799,
867
+ "grad_norm": 1.9861783019472632,
868
+ "learning_rate": 3.2107886090705035e-06,
869
+ "loss": 0.4024,
870
+ "step": 610
871
+ },
872
+ {
873
+ "epoch": 0.6604026845637584,
874
+ "grad_norm": 1.8918824877700715,
875
+ "learning_rate": 3.1234874725981045e-06,
876
+ "loss": 0.4086,
877
+ "step": 615
878
+ },
879
+ {
880
+ "epoch": 0.6657718120805369,
881
+ "grad_norm": 2.0714978550264003,
882
+ "learning_rate": 3.036847223658958e-06,
883
+ "loss": 0.396,
884
+ "step": 620
885
+ },
886
+ {
887
+ "epoch": 0.6711409395973155,
888
+ "grad_norm": 1.9560452224763145,
889
+ "learning_rate": 2.950898376017064e-06,
890
+ "loss": 0.4035,
891
+ "step": 625
892
+ },
893
+ {
894
+ "epoch": 0.676510067114094,
895
+ "grad_norm": 2.043363405835207,
896
+ "learning_rate": 2.865671199932318e-06,
897
+ "loss": 0.4068,
898
+ "step": 630
899
+ },
900
+ {
901
+ "epoch": 0.6818791946308724,
902
+ "grad_norm": 1.8816775523611597,
903
+ "learning_rate": 2.781195711499658e-06,
904
+ "loss": 0.393,
905
+ "step": 635
906
+ },
907
+ {
908
+ "epoch": 0.687248322147651,
909
+ "grad_norm": 1.8488910820957973,
910
+ "learning_rate": 2.697501662077707e-06,
911
+ "loss": 0.4013,
912
+ "step": 640
913
+ },
914
+ {
915
+ "epoch": 0.6926174496644295,
916
+ "grad_norm": 1.862219172494916,
917
+ "learning_rate": 2.6146185278106807e-06,
918
+ "loss": 0.3909,
919
+ "step": 645
920
+ },
921
+ {
922
+ "epoch": 0.697986577181208,
923
+ "grad_norm": 2.012779150472371,
924
+ "learning_rate": 2.5325754992471886e-06,
925
+ "loss": 0.3933,
926
+ "step": 650
927
+ },
928
+ {
929
+ "epoch": 0.7033557046979866,
930
+ "grad_norm": 1.925776837174688,
931
+ "learning_rate": 2.4514014710596467e-06,
932
+ "loss": 0.3794,
933
+ "step": 655
934
+ },
935
+ {
936
+ "epoch": 0.7087248322147651,
937
+ "grad_norm": 1.8323121691067996,
938
+ "learning_rate": 2.3711250318678906e-06,
939
+ "loss": 0.3906,
940
+ "step": 660
941
+ },
942
+ {
943
+ "epoch": 0.7140939597315437,
944
+ "grad_norm": 1.8449642618290978,
945
+ "learning_rate": 2.2917744541705544e-06,
946
+ "loss": 0.3804,
947
+ "step": 665
948
+ },
949
+ {
950
+ "epoch": 0.7194630872483222,
951
+ "grad_norm": 1.8385577849219443,
952
+ "learning_rate": 2.2133776843878185e-06,
953
+ "loss": 0.3896,
954
+ "step": 670
955
+ },
956
+ {
957
+ "epoch": 0.7248322147651006,
958
+ "grad_norm": 1.8863509359096609,
959
+ "learning_rate": 2.1359623330189655e-06,
960
+ "loss": 0.3906,
961
+ "step": 675
962
+ },
963
+ {
964
+ "epoch": 0.7302013422818792,
965
+ "grad_norm": 1.9412331601540616,
966
+ "learning_rate": 2.059555664918268e-06,
967
+ "loss": 0.3872,
968
+ "step": 680
969
+ },
970
+ {
971
+ "epoch": 0.7355704697986577,
972
+ "grad_norm": 1.795387975158009,
973
+ "learning_rate": 1.9841845896926022e-06,
974
+ "loss": 0.3798,
975
+ "step": 685
976
+ },
977
+ {
978
+ "epoch": 0.7409395973154362,
979
+ "grad_norm": 1.843173352636606,
980
+ "learning_rate": 1.9098756522241634e-06,
981
+ "loss": 0.3788,
982
+ "step": 690
983
+ },
984
+ {
985
+ "epoch": 0.7463087248322148,
986
+ "grad_norm": 1.912174039399666,
987
+ "learning_rate": 1.8366550233216584e-06,
988
+ "loss": 0.3695,
989
+ "step": 695
990
+ },
991
+ {
992
+ "epoch": 0.7516778523489933,
993
+ "grad_norm": 1.8237677647091424,
994
+ "learning_rate": 1.7645484905032129e-06,
995
+ "loss": 0.3933,
996
+ "step": 700
997
+ },
998
+ {
999
+ "epoch": 0.7570469798657719,
1000
+ "grad_norm": 1.7822160523413584,
1001
+ "learning_rate": 1.6935814489142937e-06,
1002
+ "loss": 0.3781,
1003
+ "step": 705
1004
+ },
1005
+ {
1006
+ "epoch": 0.7624161073825504,
1007
+ "grad_norm": 1.8878271721301074,
1008
+ "learning_rate": 1.6237788923838149e-06,
1009
+ "loss": 0.3751,
1010
+ "step": 710
1011
+ },
1012
+ {
1013
+ "epoch": 0.7677852348993288,
1014
+ "grad_norm": 1.9082160841545055,
1015
+ "learning_rate": 1.555165404621567e-06,
1016
+ "loss": 0.3728,
1017
+ "step": 715
1018
+ },
1019
+ {
1020
+ "epoch": 0.7731543624161074,
1021
+ "grad_norm": 1.816281209572928,
1022
+ "learning_rate": 1.487765150560116e-06,
1023
+ "loss": 0.3777,
1024
+ "step": 720
1025
+ },
1026
+ {
1027
+ "epoch": 0.7785234899328859,
1028
+ "grad_norm": 1.91354765663735,
1029
+ "learning_rate": 1.4216018678441558e-06,
1030
+ "loss": 0.3699,
1031
+ "step": 725
1032
+ },
1033
+ {
1034
+ "epoch": 0.7838926174496644,
1035
+ "grad_norm": 1.9355520287879238,
1036
+ "learning_rate": 1.3566988584703817e-06,
1037
+ "loss": 0.371,
1038
+ "step": 730
1039
+ },
1040
+ {
1041
+ "epoch": 0.789261744966443,
1042
+ "grad_norm": 1.7280419120289419,
1043
+ "learning_rate": 1.293078980580766e-06,
1044
+ "loss": 0.3764,
1045
+ "step": 735
1046
+ },
1047
+ {
1048
+ "epoch": 0.7946308724832215,
1049
+ "grad_norm": 1.9044555736805637,
1050
+ "learning_rate": 1.2307646404121692e-06,
1051
+ "loss": 0.3664,
1052
+ "step": 740
1053
+ },
1054
+ {
1055
+ "epoch": 0.8,
1056
+ "grad_norm": 1.891432724737264,
1057
+ "learning_rate": 1.1697777844051105e-06,
1058
+ "loss": 0.3664,
1059
+ "step": 745
1060
+ },
1061
+ {
1062
+ "epoch": 0.8053691275167785,
1063
+ "grad_norm": 1.6814136447042585,
1064
+ "learning_rate": 1.1101398914744565e-06,
1065
+ "loss": 0.3574,
1066
+ "step": 750
1067
+ },
1068
+ {
1069
+ "epoch": 0.810738255033557,
1070
+ "grad_norm": 1.8225153893838018,
1071
+ "learning_rate": 1.0518719654447896e-06,
1072
+ "loss": 0.3684,
1073
+ "step": 755
1074
+ },
1075
+ {
1076
+ "epoch": 0.8161073825503355,
1077
+ "grad_norm": 1.8275415575673986,
1078
+ "learning_rate": 9.949945276530782e-07,
1079
+ "loss": 0.3603,
1080
+ "step": 760
1081
+ },
1082
+ {
1083
+ "epoch": 0.8214765100671141,
1084
+ "grad_norm": 1.853253611406343,
1085
+ "learning_rate": 9.395276097212841e-07,
1086
+ "loss": 0.3651,
1087
+ "step": 765
1088
+ },
1089
+ {
1090
+ "epoch": 0.8268456375838926,
1091
+ "grad_norm": 1.835073747374209,
1092
+ "learning_rate": 8.854907465014479e-07,
1093
+ "loss": 0.3677,
1094
+ "step": 770
1095
+ },
1096
+ {
1097
+ "epoch": 0.8322147651006712,
1098
+ "grad_norm": 1.7695339228605491,
1099
+ "learning_rate": 8.329029691957124e-07,
1100
+ "loss": 0.3606,
1101
+ "step": 775
1102
+ },
1103
+ {
1104
+ "epoch": 0.8375838926174497,
1105
+ "grad_norm": 1.749975183558492,
1106
+ "learning_rate": 7.817827986537508e-07,
1107
+ "loss": 0.3511,
1108
+ "step": 780
1109
+ },
1110
+ {
1111
+ "epoch": 0.8429530201342282,
1112
+ "grad_norm": 1.9426041133261787,
1113
+ "learning_rate": 7.321482388499096e-07,
1114
+ "loss": 0.358,
1115
+ "step": 785
1116
+ },
1117
+ {
1118
+ "epoch": 0.8483221476510067,
1119
+ "grad_norm": 1.8171481735775137,
1120
+ "learning_rate": 6.840167705424106e-07,
1121
+ "loss": 0.3544,
1122
+ "step": 790
1123
+ },
1124
+ {
1125
+ "epoch": 0.8536912751677852,
1126
+ "grad_norm": 1.8412832647944588,
1127
+ "learning_rate": 6.374053451168166e-07,
1128
+ "loss": 0.3661,
1129
+ "step": 795
1130
+ },
1131
+ {
1132
+ "epoch": 0.8590604026845637,
1133
+ "grad_norm": 1.8350930759774366,
1134
+ "learning_rate": 5.92330378615929e-07,
1135
+ "loss": 0.3537,
1136
+ "step": 800
1137
+ },
1138
+ {
1139
+ "epoch": 0.8644295302013423,
1140
+ "grad_norm": 1.723224003576655,
1141
+ "learning_rate": 5.488077459582425e-07,
1142
+ "loss": 0.3553,
1143
+ "step": 805
1144
+ },
1145
+ {
1146
+ "epoch": 0.8697986577181208,
1147
+ "grad_norm": 1.8473917985800616,
1148
+ "learning_rate": 5.068527753469604e-07,
1149
+ "loss": 0.3603,
1150
+ "step": 810
1151
+ },
1152
+ {
1153
+ "epoch": 0.8751677852348994,
1154
+ "grad_norm": 1.8325217665657294,
1155
+ "learning_rate": 4.664802428715753e-07,
1156
+ "loss": 0.348,
1157
+ "step": 815
1158
+ },
1159
+ {
1160
+ "epoch": 0.8805369127516779,
1161
+ "grad_norm": 1.7346304200820148,
1162
+ "learning_rate": 4.2770436730388166e-07,
1163
+ "loss": 0.3498,
1164
+ "step": 820
1165
+ },
1166
+ {
1167
+ "epoch": 0.8859060402684564,
1168
+ "grad_norm": 1.7575151506469913,
1169
+ "learning_rate": 3.9053880509028086e-07,
1170
+ "loss": 0.3616,
1171
+ "step": 825
1172
+ },
1173
+ {
1174
+ "epoch": 0.8912751677852349,
1175
+ "grad_norm": 1.800873655206166,
1176
+ "learning_rate": 3.549966455421305e-07,
1177
+ "loss": 0.3507,
1178
+ "step": 830
1179
+ },
1180
+ {
1181
+ "epoch": 0.8966442953020134,
1182
+ "grad_norm": 1.710054226395714,
1183
+ "learning_rate": 3.2109040622582186e-07,
1184
+ "loss": 0.3527,
1185
+ "step": 835
1186
+ },
1187
+ {
1188
+ "epoch": 0.9020134228187919,
1189
+ "grad_norm": 1.6705083298206793,
1190
+ "learning_rate": 2.8883202855423676e-07,
1191
+ "loss": 0.3516,
1192
+ "step": 840
1193
+ },
1194
+ {
1195
+ "epoch": 0.9073825503355705,
1196
+ "grad_norm": 1.7474504216215188,
1197
+ "learning_rate": 2.582328735811029e-07,
1198
+ "loss": 0.3452,
1199
+ "step": 845
1200
+ },
1201
+ {
1202
+ "epoch": 0.912751677852349,
1203
+ "grad_norm": 1.7463913866112468,
1204
+ "learning_rate": 2.2930371799975593e-07,
1205
+ "loss": 0.3471,
1206
+ "step": 850
1207
+ },
1208
+ {
1209
+ "epoch": 0.9181208053691275,
1210
+ "grad_norm": 1.7297820385087201,
1211
+ "learning_rate": 2.0205475034770606e-07,
1212
+ "loss": 0.3468,
1213
+ "step": 855
1214
+ },
1215
+ {
1216
+ "epoch": 0.9234899328859061,
1217
+ "grad_norm": 1.7482101229826381,
1218
+ "learning_rate": 1.7649556741833995e-07,
1219
+ "loss": 0.3478,
1220
+ "step": 860
1221
+ },
1222
+ {
1223
+ "epoch": 0.9288590604026845,
1224
+ "grad_norm": 1.8150110385873126,
1225
+ "learning_rate": 1.5263517088103862e-07,
1226
+ "loss": 0.3586,
1227
+ "step": 865
1228
+ },
1229
+ {
1230
+ "epoch": 0.934228187919463,
1231
+ "grad_norm": 1.8153438746846808,
1232
+ "learning_rate": 1.304819641108801e-07,
1233
+ "loss": 0.3555,
1234
+ "step": 870
1235
+ },
1236
+ {
1237
+ "epoch": 0.9395973154362416,
1238
+ "grad_norm": 1.8087296470250676,
1239
+ "learning_rate": 1.1004374922906846e-07,
1240
+ "loss": 0.3457,
1241
+ "step": 875
1242
+ },
1243
+ {
1244
+ "epoch": 0.9449664429530201,
1245
+ "grad_norm": 1.7195355098862968,
1246
+ "learning_rate": 9.132772435510362e-08,
1247
+ "loss": 0.349,
1248
+ "step": 880
1249
+ },
1250
+ {
1251
+ "epoch": 0.9503355704697987,
1252
+ "grad_norm": 1.7647364179507874,
1253
+ "learning_rate": 7.434048107168523e-08,
1254
+ "loss": 0.3466,
1255
+ "step": 885
1256
+ },
1257
+ {
1258
+ "epoch": 0.9557046979865772,
1259
+ "grad_norm": 1.9088484806569315,
1260
+ "learning_rate": 5.908800210322696e-08,
1261
+ "loss": 0.3411,
1262
+ "step": 890
1263
+ },
1264
+ {
1265
+ "epoch": 0.9610738255033557,
1266
+ "grad_norm": 1.7271817479075642,
1267
+ "learning_rate": 4.55756592088058e-08,
1268
+ "loss": 0.3484,
1269
+ "step": 895
1270
+ },
1271
+ {
1272
+ "epoch": 0.9664429530201343,
1273
+ "grad_norm": 1.7372584442728163,
1274
+ "learning_rate": 3.3808211290284886e-08,
1275
+ "loss": 0.3433,
1276
+ "step": 900
1277
+ },
1278
+ {
1279
+ "epoch": 0.9718120805369127,
1280
+ "grad_norm": 1.808380468788094,
1281
+ "learning_rate": 2.378980271628195e-08,
1282
+ "loss": 0.3528,
1283
+ "step": 905
1284
+ },
1285
+ {
1286
+ "epoch": 0.9771812080536912,
1287
+ "grad_norm": 1.7605143851131846,
1288
+ "learning_rate": 1.552396186256411e-08,
1289
+ "loss": 0.3445,
1290
+ "step": 910
1291
+ },
1292
+ {
1293
+ "epoch": 0.9825503355704698,
1294
+ "grad_norm": 1.8047618073453124,
1295
+ "learning_rate": 9.013599869394096e-09,
1296
+ "loss": 0.3497,
1297
+ "step": 915
1298
+ },
1299
+ {
1300
+ "epoch": 0.9879194630872483,
1301
+ "grad_norm": 1.7837820069864043,
1302
+ "learning_rate": 4.261009616257638e-09,
1303
+ "loss": 0.3452,
1304
+ "step": 920
1305
+ },
1306
+ {
1307
+ "epoch": 0.9932885906040269,
1308
+ "grad_norm": 1.7898194618346237,
1309
+ "learning_rate": 1.2678649143349485e-09,
1310
+ "loss": 0.3497,
1311
+ "step": 925
1312
+ },
1313
+ {
1314
+ "epoch": 0.9986577181208054,
1315
+ "grad_norm": 1.8036764007244224,
1316
+ "learning_rate": 3.5219917003948003e-11,
1317
+ "loss": 0.346,
1318
+ "step": 930
1319
+ },
1320
+ {
1321
+ "epoch": 0.9997315436241611,
1322
+ "eval_loss": 0.2977491617202759,
1323
+ "eval_runtime": 1.3637,
1324
+ "eval_samples_per_second": 1.467,
1325
+ "eval_steps_per_second": 0.733,
1326
+ "step": 931
1327
+ },
1328
+ {
1329
+ "epoch": 0.9997315436241611,
1330
+ "step": 931,
1331
+ "total_flos": 194880114524160.0,
1332
  "train_loss": 0.0,
1333
+ "train_runtime": 0.0085,
1334
+ "train_samples_per_second": 3497894.361,
1335
+ "train_steps_per_second": 109279.854
1336
  }
1337
  ],
1338
  "logging_steps": 5,
1339
+ "max_steps": 931,
1340
  "num_input_tokens_seen": 0,
1341
  "num_train_epochs": 1,
1342
  "save_steps": 100,
 
1352
  "attributes": {}
1353
  }
1354
  },
1355
+ "total_flos": 194880114524160.0,
1356
+ "train_batch_size": 2,
1357
  "trial_name": null,
1358
  "trial_params": null
1359
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6c12087aca541aaa2c9502933a655e7e92228785f5ba9e1ce617b2a1d3bde19
3
  size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c30d56d1e5b3de7f5358c607038a976c0e55b139526af0ae83b39ad89800afe
3
  size 6520