LitterBrother-Xiao commited on
Commit
4318673
1 Parent(s): fd14bf8
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 100000,
8
+ "eos_token_id": 100001,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 4096,
14
+ "mlp_bias": false,
15
+ "model_type": "llama",
16
+ "moe_intermediate_size": 11008,
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 30,
19
+ "num_key_value_heads": 32,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.42.4",
27
+ "use_cache": false,
28
+ "vocab_size": 102400
29
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "transformers_version": "4.42.4"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2db620bab6614ed36ce6415237c0569bdae6ded58539fc2c23749e6ea3e608
3
+ size 4987202208
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b893e4733ec2bccb30fc81678c8dd6c901298c726643307757a288a7c8ddfa
3
+ size 4980945440
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41cc5b152df9a75f8730f9a0b9a707e6bf88975fe81926224c102c3c6c96f73b
3
+ size 3852615520
model.safetensors.index.json ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 13820731392
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
225
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
226
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
227
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
228
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
229
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
230
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
231
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
232
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
233
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
234
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
235
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
236
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
237
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
238
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
239
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
240
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
241
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
242
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.norm.weight": "model-00003-of-00003.safetensors"
279
+ }
280
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end▁of▁sentence|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|begin▁of▁sentence|>",
24
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
25
+ "clean_up_tokenization_spaces": false,
26
+ "eos_token": "<|end▁of▁sentence|>",
27
+ "legacy": true,
28
+ "model_max_length": 1024,
29
+ "pad_token": "<|end▁of▁sentence|>",
30
+ "sp_model_kwargs": {},
31
+ "tokenizer_class": "LlamaTokenizer",
32
+ "unk_token": null,
33
+ "use_default_system_prompt": false
34
+ }
trainer_state.json ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9967637540453075,
5
+ "eval_steps": 100,
6
+ "global_step": 77,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.025889967637540454,
13
+ "grad_norm": 9.941023016098997,
14
+ "learning_rate": 5e-08,
15
+ "logits/chosen": 34.564327239990234,
16
+ "logits/rejected": 21.430946350097656,
17
+ "logps/chosen": -193.26638793945312,
18
+ "logps/rejected": -169.8800506591797,
19
+ "loss": 0.7032,
20
+ "rewards/accuracies": 0.46875,
21
+ "rewards/chosen": 0.005037403665482998,
22
+ "rewards/margins": -0.0025963345542550087,
23
+ "rewards/rejected": 0.007633737754076719,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.05177993527508091,
28
+ "grad_norm": 9.84426167557295,
29
+ "learning_rate": 1e-07,
30
+ "logits/chosen": 34.75300598144531,
31
+ "logits/rejected": 25.311954498291016,
32
+ "logps/chosen": -189.80618286132812,
33
+ "logps/rejected": -129.24330139160156,
34
+ "loss": 0.6961,
35
+ "rewards/accuracies": 0.484375,
36
+ "rewards/chosen": -0.003958990331739187,
37
+ "rewards/margins": 0.0072118965908885,
38
+ "rewards/rejected": -0.011170885525643826,
39
+ "step": 4
40
+ },
41
+ {
42
+ "epoch": 0.07766990291262135,
43
+ "grad_norm": 9.460230767868643,
44
+ "learning_rate": 1.5e-07,
45
+ "logits/chosen": 37.28773498535156,
46
+ "logits/rejected": 27.513395309448242,
47
+ "logps/chosen": -206.36117553710938,
48
+ "logps/rejected": -171.872802734375,
49
+ "loss": 0.6974,
50
+ "rewards/accuracies": 0.53125,
51
+ "rewards/chosen": -0.034006692469120026,
52
+ "rewards/margins": -0.018697379156947136,
53
+ "rewards/rejected": -0.015309315174818039,
54
+ "step": 6
55
+ },
56
+ {
57
+ "epoch": 0.10355987055016182,
58
+ "grad_norm": 10.186609614004015,
59
+ "learning_rate": 2e-07,
60
+ "logits/chosen": 36.74854278564453,
61
+ "logits/rejected": 31.93742561340332,
62
+ "logps/chosen": -184.870361328125,
63
+ "logps/rejected": -124.89741516113281,
64
+ "loss": 0.6975,
65
+ "rewards/accuracies": 0.46875,
66
+ "rewards/chosen": -0.013519443571567535,
67
+ "rewards/margins": -0.024582354351878166,
68
+ "rewards/rejected": 0.011062909848988056,
69
+ "step": 8
70
+ },
71
+ {
72
+ "epoch": 0.12944983818770225,
73
+ "grad_norm": 9.457766196917632,
74
+ "learning_rate": 2.5e-07,
75
+ "logits/chosen": 36.366363525390625,
76
+ "logits/rejected": 29.217514038085938,
77
+ "logps/chosen": -189.72547912597656,
78
+ "logps/rejected": -133.34400939941406,
79
+ "loss": 0.6916,
80
+ "rewards/accuracies": 0.484375,
81
+ "rewards/chosen": 0.003550291759893298,
82
+ "rewards/margins": -0.00579411955550313,
83
+ "rewards/rejected": 0.009344411082565784,
84
+ "step": 10
85
+ },
86
+ {
87
+ "epoch": 0.1553398058252427,
88
+ "grad_norm": 9.820715630245564,
89
+ "learning_rate": 3e-07,
90
+ "logits/chosen": 35.559329986572266,
91
+ "logits/rejected": 24.949851989746094,
92
+ "logps/chosen": -191.26722717285156,
93
+ "logps/rejected": -136.72520446777344,
94
+ "loss": 0.6927,
95
+ "rewards/accuracies": 0.546875,
96
+ "rewards/chosen": 0.020757507532835007,
97
+ "rewards/margins": 0.039941225200891495,
98
+ "rewards/rejected": -0.019183719530701637,
99
+ "step": 12
100
+ },
101
+ {
102
+ "epoch": 0.18122977346278318,
103
+ "grad_norm": 10.000904242116592,
104
+ "learning_rate": 3.5e-07,
105
+ "logits/chosen": 36.79935073852539,
106
+ "logits/rejected": 26.168617248535156,
107
+ "logps/chosen": -189.4495849609375,
108
+ "logps/rejected": -138.61245727539062,
109
+ "loss": 0.7015,
110
+ "rewards/accuracies": 0.453125,
111
+ "rewards/chosen": 0.0028548368718475103,
112
+ "rewards/margins": 0.00897835474461317,
113
+ "rewards/rejected": -0.006123516708612442,
114
+ "step": 14
115
+ },
116
+ {
117
+ "epoch": 0.20711974110032363,
118
+ "grad_norm": 9.798236588997984,
119
+ "learning_rate": 4e-07,
120
+ "logits/chosen": 38.9239387512207,
121
+ "logits/rejected": 30.127975463867188,
122
+ "logps/chosen": -193.46170043945312,
123
+ "logps/rejected": -136.24960327148438,
124
+ "loss": 0.699,
125
+ "rewards/accuracies": 0.40625,
126
+ "rewards/chosen": -0.002874697558581829,
127
+ "rewards/margins": -0.022318758070468903,
128
+ "rewards/rejected": 0.0194440595805645,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.23300970873786409,
133
+ "grad_norm": 9.959562684371269,
134
+ "learning_rate": 4.5e-07,
135
+ "logits/chosen": 34.25529479980469,
136
+ "logits/rejected": 25.71901512145996,
137
+ "logps/chosen": -204.4850616455078,
138
+ "logps/rejected": -169.4137725830078,
139
+ "loss": 0.6928,
140
+ "rewards/accuracies": 0.453125,
141
+ "rewards/chosen": 0.02741333469748497,
142
+ "rewards/margins": 0.01749654859304428,
143
+ "rewards/rejected": 0.009916786104440689,
144
+ "step": 18
145
+ },
146
+ {
147
+ "epoch": 0.2588996763754045,
148
+ "grad_norm": 9.793123390682076,
149
+ "learning_rate": 5e-07,
150
+ "logits/chosen": 36.560420989990234,
151
+ "logits/rejected": 24.802711486816406,
152
+ "logps/chosen": -207.83480834960938,
153
+ "logps/rejected": -159.98675537109375,
154
+ "loss": 0.692,
155
+ "rewards/accuracies": 0.546875,
156
+ "rewards/chosen": 0.004708289634436369,
157
+ "rewards/margins": 0.004980792291462421,
158
+ "rewards/rejected": -0.00027250126004219055,
159
+ "step": 20
160
+ },
161
+ {
162
+ "epoch": 0.284789644012945,
163
+ "grad_norm": 10.276248281197612,
164
+ "learning_rate": 4.997252228714278e-07,
165
+ "logits/chosen": 36.67436981201172,
166
+ "logits/rejected": 25.816286087036133,
167
+ "logps/chosen": -203.99571228027344,
168
+ "logps/rejected": -157.78639221191406,
169
+ "loss": 0.689,
170
+ "rewards/accuracies": 0.578125,
171
+ "rewards/chosen": 0.04165205731987953,
172
+ "rewards/margins": 0.02663404308259487,
173
+ "rewards/rejected": 0.015018017962574959,
174
+ "step": 22
175
+ },
176
+ {
177
+ "epoch": 0.3106796116504854,
178
+ "grad_norm": 9.36691332286361,
179
+ "learning_rate": 4.989014955054745e-07,
180
+ "logits/chosen": 32.67152404785156,
181
+ "logits/rejected": 26.35110092163086,
182
+ "logps/chosen": -222.406982421875,
183
+ "logps/rejected": -174.97457885742188,
184
+ "loss": 0.6728,
185
+ "rewards/accuracies": 0.515625,
186
+ "rewards/chosen": 0.034607432782649994,
187
+ "rewards/margins": 0.0227619968354702,
188
+ "rewards/rejected": 0.011845439672470093,
189
+ "step": 24
190
+ },
191
+ {
192
+ "epoch": 0.3365695792880259,
193
+ "grad_norm": 10.075140623553729,
194
+ "learning_rate": 4.975306286336627e-07,
195
+ "logits/chosen": 34.61298751831055,
196
+ "logits/rejected": 22.848501205444336,
197
+ "logps/chosen": -190.12135314941406,
198
+ "logps/rejected": -169.7635955810547,
199
+ "loss": 0.6757,
200
+ "rewards/accuracies": 0.609375,
201
+ "rewards/chosen": 0.05975666269659996,
202
+ "rewards/margins": 0.05689358711242676,
203
+ "rewards/rejected": 0.002863079309463501,
204
+ "step": 26
205
+ },
206
+ {
207
+ "epoch": 0.36245954692556637,
208
+ "grad_norm": 9.766651095371804,
209
+ "learning_rate": 4.956156357188939e-07,
210
+ "logits/chosen": 37.388824462890625,
211
+ "logits/rejected": 24.61412239074707,
212
+ "logps/chosen": -183.52816772460938,
213
+ "logps/rejected": -142.87033081054688,
214
+ "loss": 0.6649,
215
+ "rewards/accuracies": 0.703125,
216
+ "rewards/chosen": 0.09949050843715668,
217
+ "rewards/margins": 0.07790307700634003,
218
+ "rewards/rejected": 0.0215874332934618,
219
+ "step": 28
220
+ },
221
+ {
222
+ "epoch": 0.3883495145631068,
223
+ "grad_norm": 9.534993155007124,
224
+ "learning_rate": 4.931607263312032e-07,
225
+ "logits/chosen": 35.072879791259766,
226
+ "logits/rejected": 23.343503952026367,
227
+ "logps/chosen": -201.98580932617188,
228
+ "logps/rejected": -145.196044921875,
229
+ "loss": 0.6501,
230
+ "rewards/accuracies": 0.71875,
231
+ "rewards/chosen": 0.16385386884212494,
232
+ "rewards/margins": 0.10670199245214462,
233
+ "rewards/rejected": 0.05715188756585121,
234
+ "step": 30
235
+ },
236
+ {
237
+ "epoch": 0.41423948220064727,
238
+ "grad_norm": 9.757243010304897,
239
+ "learning_rate": 4.9017129689421e-07,
240
+ "logits/chosen": 38.24150466918945,
241
+ "logits/rejected": 25.80931854248047,
242
+ "logps/chosen": -182.25128173828125,
243
+ "logps/rejected": -137.34368896484375,
244
+ "loss": 0.65,
245
+ "rewards/accuracies": 0.734375,
246
+ "rewards/chosen": 0.15896882116794586,
247
+ "rewards/margins": 0.09183251112699509,
248
+ "rewards/rejected": 0.06713630259037018,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.4401294498381877,
253
+ "grad_norm": 9.351248951840846,
254
+ "learning_rate": 4.866539188226085e-07,
255
+ "logits/chosen": 36.365047454833984,
256
+ "logits/rejected": 26.499774932861328,
257
+ "logps/chosen": -186.34649658203125,
258
+ "logps/rejected": -148.74656677246094,
259
+ "loss": 0.6368,
260
+ "rewards/accuracies": 0.734375,
261
+ "rewards/chosen": 0.16049998998641968,
262
+ "rewards/margins": 0.12190230935811996,
263
+ "rewards/rejected": 0.03859768062829971,
264
+ "step": 34
265
+ },
266
+ {
267
+ "epoch": 0.46601941747572817,
268
+ "grad_norm": 9.747161361199119,
269
+ "learning_rate": 4.826163240767716e-07,
270
+ "logits/chosen": 32.18162155151367,
271
+ "logits/rejected": 21.5031681060791,
272
+ "logps/chosen": -211.55101013183594,
273
+ "logps/rejected": -170.30624389648438,
274
+ "loss": 0.6312,
275
+ "rewards/accuracies": 0.8125,
276
+ "rewards/chosen": 0.20323500037193298,
277
+ "rewards/margins": 0.14077112078666687,
278
+ "rewards/rejected": 0.06246389076113701,
279
+ "step": 36
280
+ },
281
+ {
282
+ "epoch": 0.4919093851132686,
283
+ "grad_norm": 8.77950594997527,
284
+ "learning_rate": 4.780673881662242e-07,
285
+ "logits/chosen": 36.65592956542969,
286
+ "logits/rejected": 28.596372604370117,
287
+ "logps/chosen": -178.170166015625,
288
+ "logps/rejected": -115.30381774902344,
289
+ "loss": 0.6193,
290
+ "rewards/accuracies": 0.828125,
291
+ "rewards/chosen": 0.2410201132297516,
292
+ "rewards/margins": 0.1858924776315689,
293
+ "rewards/rejected": 0.05512763559818268,
294
+ "step": 38
295
+ },
296
+ {
297
+ "epoch": 0.517799352750809,
298
+ "grad_norm": 8.94781303872067,
299
+ "learning_rate": 4.730171106393466e-07,
300
+ "logits/chosen": 31.84834098815918,
301
+ "logits/rejected": 25.41543197631836,
302
+ "logps/chosen": -211.7396240234375,
303
+ "logps/rejected": -159.92942810058594,
304
+ "loss": 0.6189,
305
+ "rewards/accuracies": 0.796875,
306
+ "rewards/chosen": 0.2539159655570984,
307
+ "rewards/margins": 0.1977871209383011,
308
+ "rewards/rejected": 0.0561288520693779,
309
+ "step": 40
310
+ },
311
+ {
312
+ "epoch": 0.5436893203883495,
313
+ "grad_norm": 9.230235665250099,
314
+ "learning_rate": 4.6747659310219757e-07,
315
+ "logits/chosen": 39.43976593017578,
316
+ "logits/rejected": 28.925722122192383,
317
+ "logps/chosen": -206.06393432617188,
318
+ "logps/rejected": -152.7125244140625,
319
+ "loss": 0.6154,
320
+ "rewards/accuracies": 0.828125,
321
+ "rewards/chosen": 0.2500843405723572,
322
+ "rewards/margins": 0.19579222798347473,
323
+ "rewards/rejected": 0.05429207533597946,
324
+ "step": 42
325
+ },
326
+ {
327
+ "epoch": 0.56957928802589,
328
+ "grad_norm": 9.122707874165032,
329
+ "learning_rate": 4.6145801481477433e-07,
330
+ "logits/chosen": 31.554033279418945,
331
+ "logits/rejected": 22.213409423828125,
332
+ "logps/chosen": -214.81248474121094,
333
+ "logps/rejected": -200.99317932128906,
334
+ "loss": 0.5942,
335
+ "rewards/accuracies": 0.828125,
336
+ "rewards/chosen": 0.3934122622013092,
337
+ "rewards/margins": 0.2697424292564392,
338
+ "rewards/rejected": 0.12366984784603119,
339
+ "step": 44
340
+ },
341
+ {
342
+ "epoch": 0.5954692556634305,
343
+ "grad_norm": 8.293775972177585,
344
+ "learning_rate": 4.549746059183561e-07,
345
+ "logits/chosen": 40.96059799194336,
346
+ "logits/rejected": 31.219024658203125,
347
+ "logps/chosen": -186.52183532714844,
348
+ "logps/rejected": -130.83743286132812,
349
+ "loss": 0.5531,
350
+ "rewards/accuracies": 0.84375,
351
+ "rewards/chosen": 0.4351516366004944,
352
+ "rewards/margins": 0.29554393887519836,
353
+ "rewards/rejected": 0.13960765302181244,
354
+ "step": 46
355
+ },
356
+ {
357
+ "epoch": 0.6213592233009708,
358
+ "grad_norm": 8.320484480615326,
359
+ "learning_rate": 4.480406183527823e-07,
360
+ "logits/chosen": 34.98313522338867,
361
+ "logits/rejected": 23.058082580566406,
362
+ "logps/chosen": -194.0576934814453,
363
+ "logps/rejected": -143.09188842773438,
364
+ "loss": 0.5291,
365
+ "rewards/accuracies": 0.90625,
366
+ "rewards/chosen": 0.5364688634872437,
367
+ "rewards/margins": 0.3940187096595764,
368
+ "rewards/rejected": 0.14245013892650604,
369
+ "step": 48
370
+ },
371
+ {
372
+ "epoch": 0.6472491909385113,
373
+ "grad_norm": 7.915902357485634,
374
+ "learning_rate": 4.4067129452759546e-07,
375
+ "logits/chosen": 39.90618133544922,
376
+ "logits/rejected": 27.514339447021484,
377
+ "logps/chosen": -188.83193969726562,
378
+ "logps/rejected": -126.44863891601562,
379
+ "loss": 0.5153,
380
+ "rewards/accuracies": 0.921875,
381
+ "rewards/chosen": 0.5586945414543152,
382
+ "rewards/margins": 0.3602239787578583,
383
+ "rewards/rejected": 0.19847053289413452,
384
+ "step": 50
385
+ },
386
+ {
387
+ "epoch": 0.6731391585760518,
388
+ "grad_norm": 7.903680790440658,
389
+ "learning_rate": 4.3288283381591725e-07,
390
+ "logits/chosen": 33.0372428894043,
391
+ "logits/rejected": 21.942291259765625,
392
+ "logps/chosen": -183.81349182128906,
393
+ "logps/rejected": -142.9326934814453,
394
+ "loss": 0.5032,
395
+ "rewards/accuracies": 0.859375,
396
+ "rewards/chosen": 0.6302269697189331,
397
+ "rewards/margins": 0.4071354568004608,
398
+ "rewards/rejected": 0.22309155762195587,
399
+ "step": 52
400
+ },
401
+ {
402
+ "epoch": 0.6990291262135923,
403
+ "grad_norm": 7.297509807618061,
404
+ "learning_rate": 4.246923569447104e-07,
405
+ "logits/chosen": 34.447837829589844,
406
+ "logits/rejected": 20.60545539855957,
407
+ "logps/chosen": -179.7378692626953,
408
+ "logps/rejected": -145.105712890625,
409
+ "loss": 0.4965,
410
+ "rewards/accuracies": 0.9375,
411
+ "rewards/chosen": 0.727641224861145,
412
+ "rewards/margins": 0.47567129135131836,
413
+ "rewards/rejected": 0.25196999311447144,
414
+ "step": 54
415
+ },
416
+ {
417
+ "epoch": 0.7249190938511327,
418
+ "grad_norm": 7.283409160445081,
419
+ "learning_rate": 4.161178683597054e-07,
420
+ "logits/chosen": 35.52771759033203,
421
+ "logits/rejected": 30.491806030273438,
422
+ "logps/chosen": -211.5355224609375,
423
+ "logps/rejected": -149.41921997070312,
424
+ "loss": 0.4999,
425
+ "rewards/accuracies": 0.96875,
426
+ "rewards/chosen": 0.752875804901123,
427
+ "rewards/margins": 0.49092933535575867,
428
+ "rewards/rejected": 0.261946439743042,
429
+ "step": 56
430
+ },
431
+ {
432
+ "epoch": 0.7508090614886731,
433
+ "grad_norm": 7.796223292724318,
434
+ "learning_rate": 4.0717821664772124e-07,
435
+ "logits/chosen": 40.88484573364258,
436
+ "logits/rejected": 29.62355613708496,
437
+ "logps/chosen": -196.11395263671875,
438
+ "logps/rejected": -143.0408935546875,
439
+ "loss": 0.491,
440
+ "rewards/accuracies": 0.90625,
441
+ "rewards/chosen": 0.7662146091461182,
442
+ "rewards/margins": 0.5029052495956421,
443
+ "rewards/rejected": 0.26330941915512085,
444
+ "step": 58
445
+ },
446
+ {
447
+ "epoch": 0.7766990291262136,
448
+ "grad_norm": 7.057826513245534,
449
+ "learning_rate": 3.978930531033806e-07,
450
+ "logits/chosen": 40.54041290283203,
451
+ "logits/rejected": 30.305131912231445,
452
+ "logps/chosen": -174.91925048828125,
453
+ "logps/rejected": -112.93276977539062,
454
+ "loss": 0.4852,
455
+ "rewards/accuracies": 0.90625,
456
+ "rewards/chosen": 0.7946022748947144,
457
+ "rewards/margins": 0.5570634603500366,
458
+ "rewards/rejected": 0.23753879964351654,
459
+ "step": 60
460
+ },
461
+ {
462
+ "epoch": 0.8025889967637541,
463
+ "grad_norm": 7.4086734288874645,
464
+ "learning_rate": 3.882827885312998e-07,
465
+ "logits/chosen": 39.428497314453125,
466
+ "logits/rejected": 29.150917053222656,
467
+ "logps/chosen": -191.66470336914062,
468
+ "logps/rejected": -136.3485107421875,
469
+ "loss": 0.4617,
470
+ "rewards/accuracies": 0.984375,
471
+ "rewards/chosen": 0.7605564594268799,
472
+ "rewards/margins": 0.5766815543174744,
473
+ "rewards/rejected": 0.1838749349117279,
474
+ "step": 62
475
+ },
476
+ {
477
+ "epoch": 0.8284789644012945,
478
+ "grad_norm": 6.8861010004738095,
479
+ "learning_rate": 3.7836854837871044e-07,
480
+ "logits/chosen": 35.57158279418945,
481
+ "logits/rejected": 25.836395263671875,
482
+ "logps/chosen": -196.1132049560547,
483
+ "logps/rejected": -142.07156372070312,
484
+ "loss": 0.4596,
485
+ "rewards/accuracies": 0.96875,
486
+ "rewards/chosen": 0.861929714679718,
487
+ "rewards/margins": 0.6038858294487,
488
+ "rewards/rejected": 0.2580438554286957,
489
+ "step": 64
490
+ },
491
+ {
492
+ "epoch": 0.8543689320388349,
493
+ "grad_norm": 6.501368437260124,
494
+ "learning_rate": 3.681721262971413e-07,
495
+ "logits/chosen": 37.78006362915039,
496
+ "logits/rejected": 27.594064712524414,
497
+ "logps/chosen": -164.67950439453125,
498
+ "logps/rejected": -110.48625183105469,
499
+ "loss": 0.4547,
500
+ "rewards/accuracies": 1.0,
501
+ "rewards/chosen": 0.8753312826156616,
502
+ "rewards/margins": 0.6975246071815491,
503
+ "rewards/rejected": 0.17780669033527374,
504
+ "step": 66
505
+ },
506
+ {
507
+ "epoch": 0.8802588996763754,
508
+ "grad_norm": 6.543034078515488,
509
+ "learning_rate": 3.577159362352426e-07,
510
+ "logits/chosen": 37.94480514526367,
511
+ "logits/rejected": 27.400300979614258,
512
+ "logps/chosen": -161.64242553710938,
513
+ "logps/rejected": -118.77005004882812,
514
+ "loss": 0.4531,
515
+ "rewards/accuracies": 0.9375,
516
+ "rewards/chosen": 0.8563450574874878,
517
+ "rewards/margins": 0.6019262075424194,
518
+ "rewards/rejected": 0.25441890954971313,
519
+ "step": 68
520
+ },
521
+ {
522
+ "epoch": 0.9061488673139159,
523
+ "grad_norm": 6.720711950688133,
524
+ "learning_rate": 3.470229631680624e-07,
525
+ "logits/chosen": 38.4688835144043,
526
+ "logits/rejected": 28.796794891357422,
527
+ "logps/chosen": -184.064697265625,
528
+ "logps/rejected": -134.16714477539062,
529
+ "loss": 0.4334,
530
+ "rewards/accuracies": 0.921875,
531
+ "rewards/chosen": 0.8126658201217651,
532
+ "rewards/margins": 0.6111183166503906,
533
+ "rewards/rejected": 0.2015475183725357,
534
+ "step": 70
535
+ },
536
+ {
537
+ "epoch": 0.9320388349514563,
538
+ "grad_norm": 6.228494469432083,
539
+ "learning_rate": 3.361167125710832e-07,
540
+ "logits/chosen": 42.73847579956055,
541
+ "logits/rejected": 31.657482147216797,
542
+ "logps/chosen": -183.9188232421875,
543
+ "logps/rejected": -129.78868103027344,
544
+ "loss": 0.4365,
545
+ "rewards/accuracies": 0.984375,
546
+ "rewards/chosen": 0.8676687479019165,
547
+ "rewards/margins": 0.6447817087173462,
548
+ "rewards/rejected": 0.2228870689868927,
549
+ "step": 72
550
+ },
551
+ {
552
+ "epoch": 0.9579288025889967,
553
+ "grad_norm": 6.71909319850701,
554
+ "learning_rate": 3.2502115875008516e-07,
555
+ "logits/chosen": 34.51922607421875,
556
+ "logits/rejected": 20.536556243896484,
557
+ "logps/chosen": -169.9495391845703,
558
+ "logps/rejected": -114.22787475585938,
559
+ "loss": 0.4202,
560
+ "rewards/accuracies": 0.96875,
561
+ "rewards/chosen": 0.9300148487091064,
562
+ "rewards/margins": 0.7187902331352234,
563
+ "rewards/rejected": 0.21122469007968903,
564
+ "step": 74
565
+ },
566
+ {
567
+ "epoch": 0.9838187702265372,
568
+ "grad_norm": 6.3858630141282795,
569
+ "learning_rate": 3.137606921404191e-07,
570
+ "logits/chosen": 32.11302185058594,
571
+ "logits/rejected": 20.860279083251953,
572
+ "logps/chosen": -204.4998779296875,
573
+ "logps/rejected": -183.44058227539062,
574
+ "loss": 0.4167,
575
+ "rewards/accuracies": 0.96875,
576
+ "rewards/chosen": 0.9314306378364563,
577
+ "rewards/margins": 0.751998782157898,
578
+ "rewards/rejected": 0.17943182587623596,
579
+ "step": 76
580
+ }
581
+ ],
582
+ "logging_steps": 2,
583
+ "max_steps": 154,
584
+ "num_input_tokens_seen": 0,
585
+ "num_train_epochs": 2,
586
+ "save_steps": 50000,
587
+ "stateful_callbacks": {
588
+ "TrainerControl": {
589
+ "args": {
590
+ "should_epoch_stop": false,
591
+ "should_evaluate": false,
592
+ "should_log": false,
593
+ "should_save": true,
594
+ "should_training_stop": false
595
+ },
596
+ "attributes": {}
597
+ }
598
+ },
599
+ "total_flos": 0.0,
600
+ "train_batch_size": 8,
601
+ "trial_name": null,
602
+ "trial_params": null
603
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f28f90efd8dbef6a46731681c86c48389058d148a59acf0e68abe0559cd695
3
+ size 7160