yaya-sy commited on
Commit
edc3bc2
1 Parent(s): 727b131

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/gpfsdswork/dataset/HuggingFace_Models/mistralai/Mixtral-8x7B-v0.1/",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 32,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "ranks": {
22
+ "model.layers.0.block_sparse_moe.experts.0.w1": 1024,
23
+ "model.layers.0.block_sparse_moe.experts.0.w2": 1024,
24
+ "model.layers.0.block_sparse_moe.experts.0.w3": 1024,
25
+ "model.layers.0.block_sparse_moe.experts.1.w1": 1024,
26
+ "model.layers.0.block_sparse_moe.experts.1.w2": 1024,
27
+ "model.layers.0.block_sparse_moe.experts.1.w3": 1024,
28
+ "model.layers.0.block_sparse_moe.experts.2.w1": 1024,
29
+ "model.layers.0.block_sparse_moe.experts.2.w2": 1024,
30
+ "model.layers.0.block_sparse_moe.experts.2.w3": 1024,
31
+ "model.layers.0.block_sparse_moe.experts.3.w1": 1024,
32
+ "model.layers.0.block_sparse_moe.experts.3.w2": 1024,
33
+ "model.layers.0.block_sparse_moe.experts.3.w3": 1024,
34
+ "model.layers.0.block_sparse_moe.experts.4.w1": 1024,
35
+ "model.layers.0.block_sparse_moe.experts.4.w2": 1024,
36
+ "model.layers.0.block_sparse_moe.experts.4.w3": 1024,
37
+ "model.layers.0.block_sparse_moe.experts.5.w1": 1024,
38
+ "model.layers.0.block_sparse_moe.experts.5.w2": 1024,
39
+ "model.layers.0.block_sparse_moe.experts.5.w3": 1024,
40
+ "model.layers.0.block_sparse_moe.experts.6.w1": 1024,
41
+ "model.layers.0.block_sparse_moe.experts.6.w2": 1024,
42
+ "model.layers.0.block_sparse_moe.experts.6.w3": 1024,
43
+ "model.layers.0.block_sparse_moe.experts.7.w1": 1024,
44
+ "model.layers.0.block_sparse_moe.experts.7.w2": 1024,
45
+ "model.layers.0.block_sparse_moe.experts.7.w3": 1024,
46
+ "model.layers.1.block_sparse_moe.experts.0.w1": 1024,
47
+ "model.layers.1.block_sparse_moe.experts.0.w2": 1024,
48
+ "model.layers.1.block_sparse_moe.experts.0.w3": 1024,
49
+ "model.layers.1.block_sparse_moe.experts.1.w1": 1024,
50
+ "model.layers.1.block_sparse_moe.experts.1.w2": 1024,
51
+ "model.layers.1.block_sparse_moe.experts.1.w3": 1024,
52
+ "model.layers.1.block_sparse_moe.experts.2.w1": 1024,
53
+ "model.layers.1.block_sparse_moe.experts.2.w2": 1024,
54
+ "model.layers.1.block_sparse_moe.experts.2.w3": 1024,
55
+ "model.layers.1.block_sparse_moe.experts.3.w1": 1024,
56
+ "model.layers.1.block_sparse_moe.experts.3.w2": 1024,
57
+ "model.layers.1.block_sparse_moe.experts.3.w3": 1024,
58
+ "model.layers.1.block_sparse_moe.experts.4.w1": 1024,
59
+ "model.layers.1.block_sparse_moe.experts.4.w2": 1024,
60
+ "model.layers.1.block_sparse_moe.experts.4.w3": 1024,
61
+ "model.layers.1.block_sparse_moe.experts.5.w1": 1024,
62
+ "model.layers.1.block_sparse_moe.experts.5.w2": 1024,
63
+ "model.layers.1.block_sparse_moe.experts.5.w3": 1024,
64
+ "model.layers.1.block_sparse_moe.experts.6.w1": 1024,
65
+ "model.layers.1.block_sparse_moe.experts.6.w2": 1024,
66
+ "model.layers.1.block_sparse_moe.experts.6.w3": 1024,
67
+ "model.layers.1.block_sparse_moe.experts.7.w1": 1024,
68
+ "model.layers.1.block_sparse_moe.experts.7.w2": 1024,
69
+ "model.layers.1.block_sparse_moe.experts.7.w3": 1024,
70
+ "model.layers.2.block_sparse_moe.experts.0.w1": 1024,
71
+ "model.layers.2.block_sparse_moe.experts.0.w2": 1024,
72
+ "model.layers.2.block_sparse_moe.experts.0.w3": 1024,
73
+ "model.layers.2.block_sparse_moe.experts.1.w1": 1024,
74
+ "model.layers.2.block_sparse_moe.experts.1.w2": 1024,
75
+ "model.layers.2.block_sparse_moe.experts.1.w3": 1024,
76
+ "model.layers.2.block_sparse_moe.experts.2.w1": 1024,
77
+ "model.layers.2.block_sparse_moe.experts.2.w2": 1024,
78
+ "model.layers.2.block_sparse_moe.experts.2.w3": 1024,
79
+ "model.layers.2.block_sparse_moe.experts.3.w1": 1024,
80
+ "model.layers.2.block_sparse_moe.experts.3.w2": 1024,
81
+ "model.layers.2.block_sparse_moe.experts.3.w3": 1024,
82
+ "model.layers.2.block_sparse_moe.experts.4.w1": 1024,
83
+ "model.layers.2.block_sparse_moe.experts.4.w2": 1024,
84
+ "model.layers.2.block_sparse_moe.experts.4.w3": 1024,
85
+ "model.layers.2.block_sparse_moe.experts.5.w1": 1024,
86
+ "model.layers.2.block_sparse_moe.experts.5.w2": 1024,
87
+ "model.layers.2.block_sparse_moe.experts.5.w3": 1024,
88
+ "model.layers.2.block_sparse_moe.experts.6.w1": 1024,
89
+ "model.layers.2.block_sparse_moe.experts.6.w2": 1024,
90
+ "model.layers.2.block_sparse_moe.experts.6.w3": 1024,
91
+ "model.layers.2.block_sparse_moe.experts.7.w1": 1024,
92
+ "model.layers.2.block_sparse_moe.experts.7.w2": 1024,
93
+ "model.layers.2.block_sparse_moe.experts.7.w3": 1024,
94
+ "model.layers.3.block_sparse_moe.experts.0.w1": 1024,
95
+ "model.layers.3.block_sparse_moe.experts.0.w2": 1024,
96
+ "model.layers.3.block_sparse_moe.experts.0.w3": 1024,
97
+ "model.layers.3.block_sparse_moe.experts.1.w1": 1024,
98
+ "model.layers.3.block_sparse_moe.experts.1.w2": 1024,
99
+ "model.layers.3.block_sparse_moe.experts.1.w3": 1024,
100
+ "model.layers.3.block_sparse_moe.experts.2.w1": 1024,
101
+ "model.layers.3.block_sparse_moe.experts.2.w2": 1024,
102
+ "model.layers.3.block_sparse_moe.experts.2.w3": 1024,
103
+ "model.layers.3.block_sparse_moe.experts.3.w1": 1024,
104
+ "model.layers.3.block_sparse_moe.experts.3.w2": 1024,
105
+ "model.layers.3.block_sparse_moe.experts.3.w3": 1024,
106
+ "model.layers.3.block_sparse_moe.experts.4.w1": 1024,
107
+ "model.layers.3.block_sparse_moe.experts.4.w2": 1024,
108
+ "model.layers.3.block_sparse_moe.experts.4.w3": 1024,
109
+ "model.layers.3.block_sparse_moe.experts.5.w1": 1024,
110
+ "model.layers.3.block_sparse_moe.experts.5.w2": 1024,
111
+ "model.layers.3.block_sparse_moe.experts.5.w3": 1024,
112
+ "model.layers.3.block_sparse_moe.experts.6.w1": 1024,
113
+ "model.layers.3.block_sparse_moe.experts.6.w2": 1024,
114
+ "model.layers.3.block_sparse_moe.experts.6.w3": 1024,
115
+ "model.layers.3.block_sparse_moe.experts.7.w1": 1024,
116
+ "model.layers.3.block_sparse_moe.experts.7.w2": 1024,
117
+ "model.layers.3.block_sparse_moe.experts.7.w3": 1024,
118
+ "model.layers.4.block_sparse_moe.experts.0.w1": 1024,
119
+ "model.layers.4.block_sparse_moe.experts.0.w2": 1024,
120
+ "model.layers.4.block_sparse_moe.experts.0.w3": 1024,
121
+ "model.layers.4.block_sparse_moe.experts.1.w1": 1024,
122
+ "model.layers.4.block_sparse_moe.experts.1.w2": 1024,
123
+ "model.layers.4.block_sparse_moe.experts.1.w3": 1024,
124
+ "model.layers.4.block_sparse_moe.experts.2.w1": 1024,
125
+ "model.layers.4.block_sparse_moe.experts.2.w2": 1024,
126
+ "model.layers.4.block_sparse_moe.experts.2.w3": 1024,
127
+ "model.layers.4.block_sparse_moe.experts.3.w1": 1024,
128
+ "model.layers.4.block_sparse_moe.experts.3.w2": 1024,
129
+ "model.layers.4.block_sparse_moe.experts.3.w3": 1024,
130
+ "model.layers.4.block_sparse_moe.experts.4.w1": 1024,
131
+ "model.layers.4.block_sparse_moe.experts.4.w2": 1024,
132
+ "model.layers.4.block_sparse_moe.experts.4.w3": 1024,
133
+ "model.layers.4.block_sparse_moe.experts.5.w1": 1024,
134
+ "model.layers.4.block_sparse_moe.experts.5.w2": 1024,
135
+ "model.layers.4.block_sparse_moe.experts.5.w3": 1024,
136
+ "model.layers.4.block_sparse_moe.experts.6.w1": 1024,
137
+ "model.layers.4.block_sparse_moe.experts.6.w2": 1024,
138
+ "model.layers.4.block_sparse_moe.experts.6.w3": 1024,
139
+ "model.layers.4.block_sparse_moe.experts.7.w1": 1024,
140
+ "model.layers.4.block_sparse_moe.experts.7.w2": 1024,
141
+ "model.layers.4.block_sparse_moe.experts.7.w3": 1024,
142
+ "model.layers.5.block_sparse_moe.experts.0.w1": 1024,
143
+ "model.layers.5.block_sparse_moe.experts.0.w2": 1024,
144
+ "model.layers.5.block_sparse_moe.experts.0.w3": 1024,
145
+ "model.layers.5.block_sparse_moe.experts.1.w1": 1024,
146
+ "model.layers.5.block_sparse_moe.experts.1.w2": 1024,
147
+ "model.layers.5.block_sparse_moe.experts.1.w3": 1024,
148
+ "model.layers.5.block_sparse_moe.experts.2.w1": 1024,
149
+ "model.layers.5.block_sparse_moe.experts.2.w2": 1024,
150
+ "model.layers.5.block_sparse_moe.experts.2.w3": 1024,
151
+ "model.layers.5.block_sparse_moe.experts.3.w1": 1024,
152
+ "model.layers.5.block_sparse_moe.experts.3.w2": 1024,
153
+ "model.layers.5.block_sparse_moe.experts.3.w3": 1024,
154
+ "model.layers.5.block_sparse_moe.experts.4.w1": 1024,
155
+ "model.layers.5.block_sparse_moe.experts.4.w2": 1024,
156
+ "model.layers.5.block_sparse_moe.experts.4.w3": 1024,
157
+ "model.layers.5.block_sparse_moe.experts.5.w1": 1024,
158
+ "model.layers.5.block_sparse_moe.experts.5.w2": 1024,
159
+ "model.layers.5.block_sparse_moe.experts.5.w3": 1024,
160
+ "model.layers.5.block_sparse_moe.experts.6.w1": 1024,
161
+ "model.layers.5.block_sparse_moe.experts.6.w2": 1024,
162
+ "model.layers.5.block_sparse_moe.experts.6.w3": 1024,
163
+ "model.layers.5.block_sparse_moe.experts.7.w1": 1024,
164
+ "model.layers.5.block_sparse_moe.experts.7.w2": 1024,
165
+ "model.layers.5.block_sparse_moe.experts.7.w3": 1024,
166
+ "model.layers.6.block_sparse_moe.experts.0.w1": 1024,
167
+ "model.layers.6.block_sparse_moe.experts.0.w2": 1024,
168
+ "model.layers.6.block_sparse_moe.experts.0.w3": 1024,
169
+ "model.layers.6.block_sparse_moe.experts.1.w1": 1024,
170
+ "model.layers.6.block_sparse_moe.experts.1.w2": 1024,
171
+ "model.layers.6.block_sparse_moe.experts.1.w3": 1024,
172
+ "model.layers.6.block_sparse_moe.experts.2.w1": 1024,
173
+ "model.layers.6.block_sparse_moe.experts.2.w2": 1024,
174
+ "model.layers.6.block_sparse_moe.experts.2.w3": 1024,
175
+ "model.layers.6.block_sparse_moe.experts.3.w1": 1024,
176
+ "model.layers.6.block_sparse_moe.experts.3.w2": 1024,
177
+ "model.layers.6.block_sparse_moe.experts.3.w3": 1024,
178
+ "model.layers.6.block_sparse_moe.experts.4.w1": 1024,
179
+ "model.layers.6.block_sparse_moe.experts.4.w2": 1024,
180
+ "model.layers.6.block_sparse_moe.experts.4.w3": 1024,
181
+ "model.layers.6.block_sparse_moe.experts.5.w1": 1024,
182
+ "model.layers.6.block_sparse_moe.experts.5.w2": 1024,
183
+ "model.layers.6.block_sparse_moe.experts.5.w3": 1024,
184
+ "model.layers.6.block_sparse_moe.experts.6.w1": 1024,
185
+ "model.layers.6.block_sparse_moe.experts.6.w2": 1024,
186
+ "model.layers.6.block_sparse_moe.experts.6.w3": 1024,
187
+ "model.layers.6.block_sparse_moe.experts.7.w1": 1024,
188
+ "model.layers.6.block_sparse_moe.experts.7.w2": 1024,
189
+ "model.layers.6.block_sparse_moe.experts.7.w3": 1024,
190
+ "model.layers.7.block_sparse_moe.experts.0.w1": 1024,
191
+ "model.layers.7.block_sparse_moe.experts.0.w2": 1024,
192
+ "model.layers.7.block_sparse_moe.experts.0.w3": 1024,
193
+ "model.layers.7.block_sparse_moe.experts.1.w1": 1024,
194
+ "model.layers.7.block_sparse_moe.experts.1.w2": 1024,
195
+ "model.layers.7.block_sparse_moe.experts.1.w3": 1024,
196
+ "model.layers.7.block_sparse_moe.experts.2.w1": 1024,
197
+ "model.layers.7.block_sparse_moe.experts.2.w2": 1024,
198
+ "model.layers.7.block_sparse_moe.experts.2.w3": 1024,
199
+ "model.layers.7.block_sparse_moe.experts.3.w1": 1024,
200
+ "model.layers.7.block_sparse_moe.experts.3.w2": 1024,
201
+ "model.layers.7.block_sparse_moe.experts.3.w3": 1024,
202
+ "model.layers.7.block_sparse_moe.experts.4.w1": 1024,
203
+ "model.layers.7.block_sparse_moe.experts.4.w2": 1024,
204
+ "model.layers.7.block_sparse_moe.experts.4.w3": 1024,
205
+ "model.layers.7.block_sparse_moe.experts.5.w1": 1024,
206
+ "model.layers.7.block_sparse_moe.experts.5.w2": 1024,
207
+ "model.layers.7.block_sparse_moe.experts.5.w3": 1024,
208
+ "model.layers.7.block_sparse_moe.experts.6.w1": 1024,
209
+ "model.layers.7.block_sparse_moe.experts.6.w2": 1024,
210
+ "model.layers.7.block_sparse_moe.experts.6.w3": 1024,
211
+ "model.layers.7.block_sparse_moe.experts.7.w1": 1024,
212
+ "model.layers.7.block_sparse_moe.experts.7.w2": 1024,
213
+ "model.layers.7.block_sparse_moe.experts.7.w3": 1024,
214
+ "model.layers.8.block_sparse_moe.experts.0.w1": 1024,
215
+ "model.layers.8.block_sparse_moe.experts.0.w2": 1024,
216
+ "model.layers.8.block_sparse_moe.experts.0.w3": 1024,
217
+ "model.layers.8.block_sparse_moe.experts.1.w1": 1024,
218
+ "model.layers.8.block_sparse_moe.experts.1.w2": 1024,
219
+ "model.layers.8.block_sparse_moe.experts.1.w3": 1024,
220
+ "model.layers.8.block_sparse_moe.experts.2.w1": 1024,
221
+ "model.layers.8.block_sparse_moe.experts.2.w2": 1024,
222
+ "model.layers.8.block_sparse_moe.experts.2.w3": 1024,
223
+ "model.layers.8.block_sparse_moe.experts.3.w1": 1024,
224
+ "model.layers.8.block_sparse_moe.experts.3.w2": 1024,
225
+ "model.layers.8.block_sparse_moe.experts.3.w3": 1024,
226
+ "model.layers.8.block_sparse_moe.experts.4.w1": 1024,
227
+ "model.layers.8.block_sparse_moe.experts.4.w2": 1024,
228
+ "model.layers.8.block_sparse_moe.experts.4.w3": 1024,
229
+ "model.layers.8.block_sparse_moe.experts.5.w1": 1024,
230
+ "model.layers.8.block_sparse_moe.experts.5.w2": 1024,
231
+ "model.layers.8.block_sparse_moe.experts.5.w3": 1024,
232
+ "model.layers.8.block_sparse_moe.experts.6.w1": 1024,
233
+ "model.layers.8.block_sparse_moe.experts.6.w2": 1024,
234
+ "model.layers.8.block_sparse_moe.experts.6.w3": 1024,
235
+ "model.layers.8.block_sparse_moe.experts.7.w1": 1024,
236
+ "model.layers.8.block_sparse_moe.experts.7.w2": 1024,
237
+ "model.layers.8.block_sparse_moe.experts.7.w3": 1024,
238
+ "model.layers.9.block_sparse_moe.experts.0.w1": 1280,
239
+ "model.layers.9.block_sparse_moe.experts.0.w2": 1536,
240
+ "model.layers.9.block_sparse_moe.experts.0.w3": 1536,
241
+ "model.layers.9.block_sparse_moe.experts.1.w1": 1536,
242
+ "model.layers.9.block_sparse_moe.experts.1.w2": 1536,
243
+ "model.layers.9.block_sparse_moe.experts.1.w3": 1536,
244
+ "model.layers.9.block_sparse_moe.experts.2.w1": 1536,
245
+ "model.layers.9.block_sparse_moe.experts.2.w2": 1536,
246
+ "model.layers.9.block_sparse_moe.experts.2.w3": 1536,
247
+ "model.layers.9.block_sparse_moe.experts.3.w1": 1536,
248
+ "model.layers.9.block_sparse_moe.experts.3.w2": 1536,
249
+ "model.layers.9.block_sparse_moe.experts.3.w3": 1536,
250
+ "model.layers.9.block_sparse_moe.experts.4.w1": 1536,
251
+ "model.layers.9.block_sparse_moe.experts.4.w2": 1536,
252
+ "model.layers.9.block_sparse_moe.experts.4.w3": 1536,
253
+ "model.layers.9.block_sparse_moe.experts.5.w1": 1536,
254
+ "model.layers.9.block_sparse_moe.experts.5.w2": 1536,
255
+ "model.layers.9.block_sparse_moe.experts.5.w3": 1536,
256
+ "model.layers.9.block_sparse_moe.experts.6.w1": 1536,
257
+ "model.layers.9.block_sparse_moe.experts.6.w2": 1536,
258
+ "model.layers.9.block_sparse_moe.experts.6.w3": 1536,
259
+ "model.layers.9.block_sparse_moe.experts.7.w1": 1536,
260
+ "model.layers.9.block_sparse_moe.experts.7.w2": 1536,
261
+ "model.layers.9.block_sparse_moe.experts.7.w3": 1536
262
+ },
263
+ "rms_norm_eps": 1e-05,
264
+ "rope_theta": 1000000.0,
265
+ "router_aux_loss_coef": 0.02,
266
+ "router_jitter_noise": 0.0,
267
+ "sliding_window": null,
268
+ "tie_word_embeddings": false,
269
+ "torch_dtype": "bfloat16",
270
+ "transformers_version": "4.45.2",
271
+ "use_cache": true,
272
+ "vocab_size": 32000
273
+ }
configuration_mixtral.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Mixtral model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class MixtralConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
27
+ Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
28
+ with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
29
+
30
+ [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
31
+ [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`MixtralModel`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 14336):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*, defaults to 8):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57
+ The non-linear activation function (function or string) in the decoder.
58
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
59
+ The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
60
+ allows sequence of up to 4096*32 tokens.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ pad_token_id (`int`, *optional*):
69
+ The id of the padding token.
70
+ bos_token_id (`int`, *optional*, defaults to 1):
71
+ The id of the "beginning-of-sequence" token.
72
+ eos_token_id (`int`, *optional*, defaults to 2):
73
+ The id of the "end-of-sequence" token.
74
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
75
+ Whether the model's input and output word embeddings should be tied.
76
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
77
+ The base period of the RoPE embeddings.
78
+ sliding_window (`int`, *optional*):
79
+ Sliding window attention window size. If not specified, will default to `4096`.
80
+ attention_dropout (`float`, *optional*, defaults to 0.0):
81
+ The dropout ratio for the attention probabilities.
82
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
83
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
84
+ parameter
85
+ num_local_experts (`int`, *optional*, defaults to 8):
86
+ Number of experts per Sparse MLP layer.
87
+ output_router_logits (`bool`, *optional*, defaults to `False`):
88
+ Whether or not the router logits should be returned by the model. Enabeling this will also
89
+ allow the model to output the auxiliary loss. See [here]() for more details
90
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
91
+ The aux loss factor for the total loss.
92
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
93
+ Amount of noise to add to the router.
94
+
95
+ ```python
96
+ >>> from transformers import MixtralModel, MixtralConfig
97
+
98
+ >>> # Initializing a Mixtral 7B style configuration
99
+ >>> configuration = MixtralConfig()
100
+
101
+ >>> # Initializing a model from the Mixtral 7B style configuration
102
+ >>> model = MixtralModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+
108
+ model_type = "mixtral"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+
111
+ def __init__(
112
+ self,
113
+ ranks=None,
114
+ vocab_size=32000,
115
+ hidden_size=4096,
116
+ intermediate_size=14336,
117
+ num_hidden_layers=32,
118
+ num_attention_heads=32,
119
+ num_key_value_heads=8,
120
+ hidden_act="silu",
121
+ max_position_embeddings=4096 * 32,
122
+ initializer_range=0.02,
123
+ rms_norm_eps=1e-5,
124
+ use_cache=True,
125
+ pad_token_id=None,
126
+ bos_token_id=1,
127
+ eos_token_id=2,
128
+ tie_word_embeddings=False,
129
+ rope_theta=1e6,
130
+ sliding_window=None,
131
+ attention_dropout=0.0,
132
+ num_experts_per_tok=2,
133
+ num_local_experts=8,
134
+ output_router_logits=False,
135
+ router_aux_loss_coef=0.001,
136
+ router_jitter_noise=0.0,
137
+ **kwargs,
138
+ ):
139
+ self.ranks = ranks
140
+ self.vocab_size = vocab_size
141
+ self.max_position_embeddings = max_position_embeddings
142
+ self.hidden_size = hidden_size
143
+ self.intermediate_size = intermediate_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+ self.sliding_window = sliding_window
147
+
148
+ # for backward compatibility
149
+ if num_key_value_heads is None:
150
+ num_key_value_heads = num_attention_heads
151
+
152
+ self.num_key_value_heads = num_key_value_heads
153
+ self.hidden_act = hidden_act
154
+ self.initializer_range = initializer_range
155
+ self.rms_norm_eps = rms_norm_eps
156
+ self.use_cache = use_cache
157
+ self.rope_theta = rope_theta
158
+ self.attention_dropout = attention_dropout
159
+
160
+ self.num_experts_per_tok = num_experts_per_tok
161
+ self.num_local_experts = num_local_experts
162
+ self.output_router_logits = output_router_logits
163
+ self.router_aux_loss_coef = router_aux_loss_coef
164
+ self.router_jitter_noise = router_jitter_noise
165
+ super().__init__(
166
+ pad_token_id=pad_token_id,
167
+ bos_token_id=bos_token_id,
168
+ eos_token_id=eos_token_id,
169
+ tie_word_embeddings=tie_word_embeddings,
170
+ **kwargs,
171
+ )
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "temperature": null,
6
+ "top_p": null,
7
+ "transformers_version": "4.45.2"
8
+ }
model-00001-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2cf6873c7b52a44545631be3c8d3e3a65276f3e68abe6cd9bd803d119e95d7
3
+ size 4993746392
model-00002-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e856b4f4287796094075f07c2a484a07a7896726c437d0f2cb0b5d2633d22c02
3
+ size 4988519984
model-00003-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeea38dcc6c656f25e6de06a03eec70db83654ef9cd015b2479e881952428774
3
+ size 4901217264
model-00004-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dda445ae3cf753bd09a38d74a5dce5ca90e885627ffb0bf84947b400772ab771
3
+ size 4983004072
model-00005-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:779e1e8ba251b4db226eac9a479c57593c490b5c4ef7403b416b237281f54b16
3
+ size 4899035248
model-00006-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:570401d45132918c7fef74d0d5e46bdc91fdd2d8d6bf16f646c5b4ecacef3daa
3
+ size 4983004072
model-00007-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4d44d66f6adb85847d43b360d8921da1fc3f48bdd86bcdabd8ba3bea9e04a31
3
+ size 4983004072
model-00008-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4c2aef1f5826dca3038cbcbbb85da3a8c1ff73a226177400008f6afec486f0
3
+ size 4983004072
model-00009-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:806d5d7a8fbff5f01659098ab65416e0341b5de37cdd7e7189aafc78cdfbc73e
3
+ size 4899035248
model-00010-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aeaba68a7904058c45e8b2ecbc4c2a791ce3042dbcac08eb7da37f801ba16e1
3
+ size 4983004072
model-00011-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d0eb5e2dee075a1ed0ff88c3427d5faf25359273fdb42fb5137e2113591ee7
3
+ size 4983004072
model-00012-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ab8cf2485a16ff9fb4330bb4ad00adedf8fb1800f080d370e516fef97759312
3
+ size 4899035248
model-00013-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6556dce6c01bb22ab7760f911da67659a9e2d872b494650f9a451326d61a09e5
3
+ size 4983004072
model-00014-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c028ab45ac38440c8c71a6bf49acbcb495051c245cac1f7232beab591a1802dd
3
+ size 4983004072
model-00015-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd6e2748b77306bcb009d2ea64b99b5566a91d0b82968324c075edce200fa80
3
+ size 4899035248
model-00016-of-00016.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff50dbf481e2e4a6bffdbcfcae3b7ca667d26e42f474f4f0fc9fb3abf78f0ecd
3
+ size 379609664
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_mixtral.py ADDED
@@ -0,0 +1,1787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Mixtral model."""
21
+
22
+ import math
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ import torch.utils.checkpoint
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from transformers.activations import ACT2FN
32
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
33
+ from transformers.generation import GenerationMixin
34
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask
35
+ from transformers.modeling_outputs import (
36
+ MoeCausalLMOutputWithPast,
37
+ MoeModelOutputWithPast,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutputWithPast,
40
+ TokenClassifierOutput,
41
+ )
42
+ from transformers.modeling_utils import PreTrainedModel
43
+ from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_13
44
+ from transformers.utils import (
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ is_flash_attn_2_available,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from transformers.utils.import_utils import is_torch_fx_available
52
+ from .configuration_mixtral import MixtralConfig
53
+
54
+
55
+ if is_flash_attn_2_available():
56
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
57
+
58
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
59
+ # It means that the function will not be traced through and simply appear as a node in the graph.
60
+ if is_torch_fx_available():
61
+ if not is_torch_greater_or_equal_than_1_13:
62
+ import torch.fx
63
+
64
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
65
+
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+ _CONFIG_FOR_DOC = "MixtralConfig"
70
+
71
+
72
+ def load_balancing_loss_func(
73
+ gate_logits: Union[torch.Tensor, Tuple[torch.Tensor], None],
74
+ num_experts: Optional[int] = None,
75
+ top_k=2,
76
+ attention_mask: Optional[torch.Tensor] = None,
77
+ ) -> Union[torch.Tensor, int]:
78
+ r"""
79
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
80
+
81
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
82
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
83
+ experts is too unbalanced.
84
+
85
+ Args:
86
+ gate_logits:
87
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
88
+ shape [batch_size X sequence_length, num_experts].
89
+ num_experts:
90
+ Number of experts
91
+ top_k:
92
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
93
+ parameter.
94
+ attention_mask (`torch.Tensor`, *optional*):
95
+ The attention_mask used in forward function
96
+ shape [batch_size X sequence_length] if not None.
97
+
98
+ Returns:
99
+ The auxiliary loss.
100
+ """
101
+ if gate_logits is None or not isinstance(gate_logits, tuple):
102
+ return 0
103
+
104
+ if isinstance(gate_logits, tuple):
105
+ compute_device = gate_logits[0].device
106
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
107
+
108
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
109
+
110
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
111
+
112
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
113
+
114
+ if attention_mask is None:
115
+ # Compute the percentage of tokens routed to each experts
116
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
117
+
118
+ # Compute the average probability of routing to these experts
119
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
120
+ else:
121
+ batch_size, sequence_length = attention_mask.shape
122
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
123
+
124
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
125
+ expert_attention_mask = (
126
+ attention_mask[None, :, :, None, None]
127
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
128
+ .reshape(-1, top_k, num_experts)
129
+ .to(compute_device)
130
+ )
131
+
132
+ # Compute the percentage of tokens routed to each experts
133
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
134
+ expert_attention_mask, dim=0
135
+ )
136
+
137
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
138
+ router_per_expert_attention_mask = (
139
+ attention_mask[None, :, :, None]
140
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
141
+ .reshape(-1, num_experts)
142
+ .to(compute_device)
143
+ )
144
+
145
+ # Compute the average probability of routing to these experts
146
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
147
+ router_per_expert_attention_mask, dim=0
148
+ )
149
+
150
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
151
+ return overall_loss * num_experts
152
+
153
+
154
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mixtral
155
+ class MixtralRMSNorm(nn.Module):
156
+ def __init__(self, hidden_size, eps=1e-6):
157
+ """
158
+ MixtralRMSNorm is equivalent to T5LayerNorm
159
+ """
160
+ super().__init__()
161
+ self.weight = nn.Parameter(torch.ones(hidden_size))
162
+ self.variance_epsilon = eps
163
+
164
+ def forward(self, hidden_states):
165
+ input_dtype = hidden_states.dtype
166
+ hidden_states = hidden_states.to(torch.float32)
167
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
168
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
169
+ return self.weight * hidden_states.to(input_dtype)
170
+
171
+ def extra_repr(self):
172
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
173
+
174
+
175
+ # copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Mixtral
176
+ # TODO @longjie no longer copied from Mistral after static cache
177
+ class MixtralRotaryEmbedding(nn.Module):
178
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
179
+ super().__init__()
180
+
181
+ self.dim = dim
182
+ self.max_position_embeddings = max_position_embeddings
183
+ self.base = base
184
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
185
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
186
+
187
+ # Build here to make `torch.jit.trace` work.
188
+ self._set_cos_sin_cache(
189
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
190
+ )
191
+
192
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
193
+ self.max_seq_len_cached = seq_len
194
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
195
+
196
+ freqs = torch.outer(t, self.inv_freq)
197
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
198
+ emb = torch.cat((freqs, freqs), dim=-1)
199
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
200
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
201
+
202
+ def forward(self, x, seq_len=None):
203
+ # x: [bs, num_attention_heads, seq_len, head_size]
204
+ if seq_len > self.max_seq_len_cached:
205
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
206
+
207
+ return (
208
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
209
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
210
+ )
211
+
212
+
213
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
214
+ def rotate_half(x):
215
+ """Rotates half the hidden dims of the input."""
216
+ x1 = x[..., : x.shape[-1] // 2]
217
+ x2 = x[..., x.shape[-1] // 2 :]
218
+ return torch.cat((-x2, x1), dim=-1)
219
+
220
+
221
+ # copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
222
+ # TODO @longjie no longer copied from Mistral after static cache
223
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
224
+ """Applies Rotary Position Embedding to the query and key tensors.
225
+
226
+ Args:
227
+ q (`torch.Tensor`): The query tensor.
228
+ k (`torch.Tensor`): The key tensor.
229
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
230
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
231
+ position_ids (`torch.Tensor`):
232
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
233
+ used to pass offsetted position ids when working with a KV-cache.
234
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
235
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
236
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
237
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
238
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
239
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
240
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
241
+ Returns:
242
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
243
+ """
244
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
245
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
246
+ q_embed = (q * cos) + (rotate_half(q) * sin)
247
+ k_embed = (k * cos) + (rotate_half(k) * sin)
248
+ return q_embed, k_embed
249
+
250
+
251
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
252
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
253
+ """
254
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
255
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
256
+ """
257
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
258
+ if n_rep == 1:
259
+ return hidden_states
260
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
261
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
262
+
263
+
264
+ # copied from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Mixtral
265
+ # TODO @longjie no longer copied from Mistral after static cache
266
+ class MixtralAttention(nn.Module):
267
+ """
268
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
269
+ and "Generating Long Sequences with Sparse Transformers".
270
+ """
271
+
272
+ def __init__(self, config: MixtralConfig, layer_idx: Optional[int] = None):
273
+ super().__init__()
274
+ self.config = config
275
+ self.layer_idx = layer_idx
276
+ if layer_idx is None:
277
+ logger.warning_once(
278
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
279
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
280
+ "when creating this class."
281
+ )
282
+
283
+ self.hidden_size = config.hidden_size
284
+ self.num_heads = config.num_attention_heads
285
+ self.head_dim = self.hidden_size // self.num_heads
286
+ self.num_key_value_heads = config.num_key_value_heads
287
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
288
+ self.max_position_embeddings = config.max_position_embeddings
289
+ self.rope_theta = config.rope_theta
290
+ self.is_causal = True
291
+ self.attention_dropout = config.attention_dropout
292
+
293
+ if (self.head_dim * self.num_heads) != self.hidden_size:
294
+ raise ValueError(
295
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
296
+ f" and `num_heads`: {self.num_heads})."
297
+ )
298
+ if f"model.layers.{layer_idx}.self_attn.q_proj" in self.config.ranks:
299
+ rank = self.config.ranks[f"model.layers.{layer_idx}.self_attn.q_proj"]
300
+ self.q_proj = nn.Sequential(nn.Linear(self.hidden_size, rank, bias=False),
301
+ nn.Linear(rank, self.num_heads * self.head_dim, bias=False))
302
+ else:
303
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
304
+
305
+ if f"model.layers.{layer_idx}.self_attn.k_proj" in self.config.ranks:
306
+ rank = self.config.ranks[f"model.layers.{layer_idx}.self_attn.k_proj"]
307
+ self.k_proj = nn.Sequential(nn.Linear(self.hidden_size, rank, bias=False),
308
+ nn.Linear(rank, self.num_key_value_heads * self.head_dim, bias=False))
309
+ else:
310
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
311
+
312
+ if f"model.layers.{layer_idx}.self_attn.v_proj" in self.config.ranks:
313
+ rank = self.config.ranks[f"model.layers.{layer_idx}.self_attn.v_proj"]
314
+ self.v_proj = nn.Sequential(nn.Linear(self.hidden_size, rank, bias=False),
315
+ nn.Linear(rank, self.num_key_value_heads * self.head_dim, bias=False))
316
+ else:
317
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
318
+
319
+ if f"model.layers.{layer_idx}.self_attn.o_proj" in self.config.ranks:
320
+ rank = self.config.ranks[f"model.layers.{layer_idx}.self_attn.o_proj"]
321
+ self.o_proj = nn.Sequential(nn.Linear(self.num_heads * self.head_dim, rank, bias=False),
322
+ nn.Linear(rank, self.hidden_size, bias=False))
323
+ else:
324
+ self.o_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
325
+
326
+ self.rotary_emb = MixtralRotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ base=self.rope_theta,
330
+ )
331
+
332
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
333
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
334
+
335
+ def forward(
336
+ self,
337
+ hidden_states: torch.Tensor,
338
+ attention_mask: Optional[torch.Tensor] = None,
339
+ position_ids: Optional[torch.LongTensor] = None,
340
+ past_key_value: Optional[Cache] = None,
341
+ output_attentions: bool = False,
342
+ use_cache: bool = False,
343
+ cache_position: Optional[torch.LongTensor] = None,
344
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
345
+ bsz, q_len, _ = hidden_states.size()
346
+
347
+ query_states = self.q_proj(hidden_states)
348
+ key_states = self.k_proj(hidden_states)
349
+ value_states = self.v_proj(hidden_states)
350
+
351
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
352
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
353
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
354
+
355
+ kv_seq_len = key_states.shape[-2]
356
+ if past_key_value is not None:
357
+ if self.layer_idx is None:
358
+ raise ValueError(
359
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
360
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
361
+ "with a layer index."
362
+ )
363
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
364
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
365
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
366
+
367
+ if past_key_value is not None:
368
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
369
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
370
+
371
+ # repeat k/v heads if n_kv_heads < n_heads
372
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
373
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
374
+
375
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
376
+
377
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
378
+ raise ValueError(
379
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
380
+ f" {attn_weights.size()}"
381
+ )
382
+
383
+ if attention_mask is not None: # no matter the length, we just slice it
384
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
385
+ attn_weights = attn_weights + causal_mask
386
+
387
+ # upcast attention to fp32
388
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
389
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
390
+ attn_output = torch.matmul(attn_weights, value_states)
391
+
392
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
393
+ raise ValueError(
394
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
395
+ f" {attn_output.size()}"
396
+ )
397
+
398
+ attn_output = attn_output.transpose(1, 2).contiguous()
399
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
400
+
401
+ attn_output = self.o_proj(attn_output)
402
+
403
+ if not output_attentions:
404
+ attn_weights = None
405
+
406
+ return attn_output, attn_weights, past_key_value
407
+
408
+
409
+ # copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Mixtral
410
+ # TODO @longjie no longer copied from Mistral after static cache
411
+ class MixtralFlashAttention2(MixtralAttention):
412
+ """
413
+ Mixtral flash attention module. This module inherits from `MixtralAttention` as the weights of the module stays
414
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
415
+ flash attention and deal with padding tokens in case the input contains any of them.
416
+ """
417
+
418
+ def forward(
419
+ self,
420
+ hidden_states: torch.Tensor,
421
+ attention_mask: Optional[torch.Tensor] = None,
422
+ position_ids: Optional[torch.LongTensor] = None,
423
+ past_key_value: Optional[Cache] = None,
424
+ output_attentions: bool = False,
425
+ use_cache: bool = False,
426
+ cache_position: Optional[torch.LongTensor] = None,
427
+ ):
428
+ bsz, q_len, _ = hidden_states.size()
429
+
430
+ query_states = self.q_proj(hidden_states)
431
+ key_states = self.k_proj(hidden_states)
432
+ value_states = self.v_proj(hidden_states)
433
+
434
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
435
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
436
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
437
+
438
+ kv_seq_len = key_states.shape[-2]
439
+ if past_key_value is not None:
440
+ if self.layer_idx is None:
441
+ raise ValueError(
442
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
443
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
444
+ "with a layer index."
445
+ )
446
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
447
+
448
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
449
+ rotary_seq_len = (
450
+ max(kv_seq_len, position_ids[:, -1].max().item() + 1) if position_ids is not None else kv_seq_len
451
+ )
452
+
453
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
454
+
455
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
456
+
457
+ if past_key_value is not None:
458
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
459
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
460
+ if (
461
+ getattr(self.config, "sliding_window", None) is not None
462
+ and kv_seq_len > self.config.sliding_window
463
+ and cache_has_contents
464
+ ):
465
+ slicing_tokens = 1 - self.config.sliding_window
466
+
467
+ past_key = past_key_value[self.layer_idx][0]
468
+ past_value = past_key_value[self.layer_idx][1]
469
+
470
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
471
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
472
+
473
+ if past_key.shape[-2] != self.config.sliding_window - 1:
474
+ raise ValueError(
475
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
476
+ f" {past_key.shape}"
477
+ )
478
+
479
+ if attention_mask is not None:
480
+ attention_mask = attention_mask[:, slicing_tokens:]
481
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
482
+
483
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
484
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
485
+
486
+ # repeat k/v heads if n_kv_heads < n_heads
487
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
488
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
489
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
490
+
491
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
492
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
493
+ # cast them back in float16 just to be sure everything works as expected.
494
+ input_dtype = query_states.dtype
495
+ if input_dtype == torch.float32:
496
+ if torch.is_autocast_enabled():
497
+ target_dtype = torch.get_autocast_gpu_dtype()
498
+ # Handle the case where the model is quantized
499
+ elif hasattr(self.config, "_pre_quantization_dtype"):
500
+ target_dtype = self.config._pre_quantization_dtype
501
+ else:
502
+ target_dtype = self.q_proj.weight.dtype
503
+
504
+ logger.warning_once(
505
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
506
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
507
+ f" {target_dtype}."
508
+ )
509
+
510
+ query_states = query_states.to(target_dtype)
511
+ key_states = key_states.to(target_dtype)
512
+ value_states = value_states.to(target_dtype)
513
+
514
+ # Reashape to the expected shape for Flash Attention
515
+ query_states = query_states.transpose(1, 2)
516
+ key_states = key_states.transpose(1, 2)
517
+ value_states = value_states.transpose(1, 2)
518
+
519
+ attn_output = _flash_attention_forward(
520
+ query_states,
521
+ key_states,
522
+ value_states,
523
+ attention_mask,
524
+ q_len,
525
+ position_ids=position_ids,
526
+ dropout=dropout_rate,
527
+ sliding_window=getattr(self.config, "sliding_window", None),
528
+ is_causal=self.is_causal,
529
+ )
530
+
531
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
532
+ attn_output = self.o_proj(attn_output)
533
+
534
+ if not output_attentions:
535
+ attn_weights = None
536
+
537
+ return attn_output, attn_weights, past_key_value
538
+
539
+
540
+ # copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Mixtral
541
+ # TODO @longjie no longer copied from Mistral after static cache
542
+ class MixtralSdpaAttention(MixtralAttention):
543
+ """
544
+ Mixtral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
545
+ `MixtralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
546
+ SDPA API.
547
+ """
548
+
549
+ # Adapted from MixtralAttention.forward
550
+ def forward(
551
+ self,
552
+ hidden_states: torch.Tensor,
553
+ attention_mask: Optional[torch.Tensor] = None,
554
+ position_ids: Optional[torch.LongTensor] = None,
555
+ past_key_value: Optional[Cache] = None,
556
+ output_attentions: bool = False,
557
+ use_cache: bool = False,
558
+ cache_position: Optional[torch.LongTensor] = None,
559
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
560
+ if output_attentions:
561
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
562
+ logger.warning_once(
563
+ "MixtralModel is using MixtralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
564
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
565
+ )
566
+ return super().forward(
567
+ hidden_states=hidden_states,
568
+ attention_mask=attention_mask,
569
+ position_ids=position_ids,
570
+ past_key_value=past_key_value,
571
+ output_attentions=output_attentions,
572
+ use_cache=use_cache,
573
+ )
574
+
575
+ bsz, q_len, _ = hidden_states.size()
576
+
577
+ query_states = self.q_proj(hidden_states)
578
+ key_states = self.k_proj(hidden_states)
579
+ value_states = self.v_proj(hidden_states)
580
+
581
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
582
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
583
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
584
+
585
+ kv_seq_len = key_states.shape[-2]
586
+ if past_key_value is not None:
587
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
588
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
589
+
590
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
591
+
592
+ if past_key_value is not None:
593
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
594
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
595
+
596
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
597
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
598
+
599
+ causal_mask = attention_mask
600
+ if attention_mask is not None: # no matter the length, we just slice it
601
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
602
+
603
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
604
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
605
+ if query_states.device.type == "cuda" and attention_mask is not None:
606
+ query_states = query_states.contiguous()
607
+ key_states = key_states.contiguous()
608
+ value_states = value_states.contiguous()
609
+
610
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
611
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
612
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
613
+ is_causal = True if causal_mask is None and q_len > 1 else False
614
+
615
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
616
+ query_states,
617
+ key_states,
618
+ value_states,
619
+ attn_mask=causal_mask,
620
+ dropout_p=self.attention_dropout if self.training else 0.0,
621
+ is_causal=is_causal,
622
+ )
623
+
624
+ attn_output = attn_output.transpose(1, 2).contiguous()
625
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
626
+
627
+ attn_output = self.o_proj(attn_output)
628
+
629
+ return attn_output, None, past_key_value
630
+
631
+
632
+ MIXTRAL_ATTENTION_CLASSES = {
633
+ "eager": MixtralAttention,
634
+ "flash_attention_2": MixtralFlashAttention2,
635
+ "sdpa": MixtralSdpaAttention,
636
+ }
637
+
638
+
639
+ class MixtralBlockSparseTop2MLP(nn.Module):
640
+ def __init__(self, config: MixtralConfig, layer_idx: int, idx: int):
641
+ super().__init__()
642
+ self.ffn_dim = config.intermediate_size
643
+ self.hidden_dim = config.hidden_size
644
+ if f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w1" in config.ranks:
645
+ rank = config.ranks[f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w1"]
646
+ self.w1 = nn.Sequential(nn.Linear(self.hidden_dim, rank, bias=False),
647
+ nn.Linear(rank, self.ffn_dim, bias=False))
648
+ else:
649
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
650
+
651
+ if f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w2" in config.ranks:
652
+ rank = config.ranks[f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w2"]
653
+ self.w2 = nn.Sequential(nn.Linear(self.ffn_dim, rank, bias=False),
654
+ nn.Linear(rank, self.hidden_dim, bias=False))
655
+ else:
656
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
657
+
658
+ if f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w3" in config.ranks:
659
+ rank = config.ranks[f"model.layers.{layer_idx}.block_sparse_moe.experts.{idx}.w3"]
660
+ self.w3 = nn.Sequential(nn.Linear(self.hidden_dim, rank, bias=False),
661
+ nn.Linear(rank, self.ffn_dim, bias=False))
662
+ else:
663
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
664
+
665
+ self.act_fn = ACT2FN[config.hidden_act]
666
+
667
+ def forward(self, hidden_states):
668
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
669
+ current_hidden_states = self.w2(current_hidden_states)
670
+ return current_hidden_states
671
+
672
+
673
+ class MixtralSparseMoeBlock(nn.Module):
674
+ """
675
+ This implementation is
676
+ strictly equivalent to standard MoE with full capacity (no
677
+ dropped tokens). It's faster since it formulates MoE operations
678
+ in terms of block-sparse operations to accomodate imbalanced
679
+ assignments of tokens to experts, whereas standard MoE either
680
+ (1) drop tokens at the cost of reduced performance or (2) set
681
+ capacity factor to number of experts and thus waste computation
682
+ and memory on padding.
683
+ """
684
+
685
+ def __init__(self, config, layer_idx):
686
+ super().__init__()
687
+ self.hidden_dim = config.hidden_size
688
+ self.ffn_dim = config.intermediate_size
689
+ self.num_experts = config.num_local_experts
690
+ self.top_k = config.num_experts_per_tok
691
+ self.layer_idx = layer_idx
692
+
693
+ # gating
694
+ self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
695
+
696
+ self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config, layer_idx, idx) for idx in range(self.num_experts)])
697
+
698
+ # Jitter parameters
699
+ self.jitter_noise = config.router_jitter_noise
700
+
701
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
702
+ """ """
703
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
704
+ if self.training and self.jitter_noise > 0:
705
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
706
+ hidden_states = hidden_states.view(-1, hidden_dim)
707
+ # router_logits: (batch * sequence_length, n_experts)
708
+ router_logits = self.gate(hidden_states)
709
+
710
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
711
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
712
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
713
+ # we cast back to the input dtype
714
+ routing_weights = routing_weights.to(hidden_states.dtype)
715
+
716
+ final_hidden_states = torch.zeros(
717
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
718
+ )
719
+
720
+ # One hot encode the selected experts to create an expert mask
721
+ # this will be used to easily index which expert is going to be sollicitated
722
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
723
+
724
+ # Loop over all available experts in the model and perform the computation on each expert
725
+ for expert_idx in range(self.num_experts):
726
+ expert_layer = self.experts[expert_idx]
727
+ idx, top_x = torch.where(expert_mask[expert_idx])
728
+
729
+ # Index the correct hidden states and compute the expert hidden state for
730
+ # the current expert. We need to make sure to multiply the output hidden
731
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
732
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
733
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
734
+
735
+ # However `index_add_` only support torch tensors for indexing so we'll use
736
+ # the `top_x` tensor here.
737
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
738
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
739
+ return final_hidden_states, router_logits
740
+
741
+
742
+ class MixtralDecoderLayer(nn.Module):
743
+ def __init__(self, config: MixtralConfig, layer_idx: int):
744
+ super().__init__()
745
+ self.hidden_size = config.hidden_size
746
+
747
+ self.self_attn = MIXTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
748
+
749
+ self.block_sparse_moe = MixtralSparseMoeBlock(config, layer_idx=layer_idx)
750
+ self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
751
+ self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
752
+
753
+ def forward(
754
+ self,
755
+ hidden_states: torch.Tensor,
756
+ attention_mask: Optional[torch.Tensor] = None,
757
+ position_ids: Optional[torch.LongTensor] = None,
758
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
759
+ output_attentions: Optional[bool] = False,
760
+ output_router_logits: Optional[bool] = False,
761
+ use_cache: Optional[bool] = False,
762
+ cache_position: Optional[torch.LongTensor] = None,
763
+ **kwargs,
764
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
765
+ """
766
+ Args:
767
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
768
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
769
+ `(batch, sequence_length)` where padding elements are indicated by 0.
770
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
771
+ output_attentions (`bool`, *optional*):
772
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
773
+ returned tensors for more detail.
774
+ output_router_logits (`bool`, *optional*):
775
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
776
+ should not be returned during inference.
777
+ use_cache (`bool`, *optional*):
778
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
779
+ (see `past_key_values`).
780
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
781
+ Indices depicting the position of the input sequence tokens in the sequence.
782
+ kwargs (`dict`, *optional*):
783
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
784
+ into the model
785
+ """
786
+
787
+ residual = hidden_states
788
+
789
+ hidden_states = self.input_layernorm(hidden_states)
790
+
791
+ # Self Attention
792
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
793
+ hidden_states=hidden_states,
794
+ attention_mask=attention_mask,
795
+ position_ids=position_ids,
796
+ past_key_value=past_key_value,
797
+ output_attentions=output_attentions,
798
+ use_cache=use_cache,
799
+ cache_position=cache_position,
800
+ )
801
+ hidden_states = residual + hidden_states
802
+
803
+ # Fully Connected
804
+ residual = hidden_states
805
+ hidden_states = self.post_attention_layernorm(hidden_states)
806
+ hidden_states, router_logits = self.block_sparse_moe(hidden_states)
807
+ hidden_states = residual + hidden_states
808
+
809
+ outputs = (hidden_states,)
810
+
811
+ if output_attentions:
812
+ outputs += (self_attn_weights,)
813
+
814
+ if use_cache:
815
+ outputs += (present_key_value,)
816
+
817
+ if output_router_logits:
818
+ outputs += (router_logits,)
819
+
820
+ return outputs
821
+
822
+
823
+ MIXTRAL_START_DOCSTRING = r"""
824
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
825
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
826
+ etc.)
827
+
828
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
829
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
830
+ and behavior.
831
+
832
+ Parameters:
833
+ config ([`MixtralConfig`]):
834
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
835
+ load the weights associated with the model, only the configuration. Check out the
836
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
837
+ """
838
+
839
+
840
+ @add_start_docstrings(
841
+ "The bare Mixtral Model outputting raw hidden-states without any specific head on top.",
842
+ MIXTRAL_START_DOCSTRING,
843
+ )
844
+ # copied from transformers.models.qwen2.modeling_qwen2.Qwen2PreTrainedModel with Qwen2->Mixtral
845
+ # TODO (Raushan): bring back copied after compile compatibility
846
+ class MixtralPreTrainedModel(PreTrainedModel):
847
+ config_class = MixtralConfig
848
+ base_model_prefix = "model"
849
+ supports_gradient_checkpointing = True
850
+ _no_split_modules = ["MixtralDecoderLayer"]
851
+ _skip_keys_device_placement = "past_key_values"
852
+ _supports_flash_attn_2 = True
853
+ _supports_sdpa = True
854
+ _supports_cache_class = True
855
+
856
+ def _init_weights(self, module):
857
+ std = self.config.initializer_range
858
+ if isinstance(module, nn.Linear):
859
+ module.weight.data.normal_(mean=0.0, std=std)
860
+ if module.bias is not None:
861
+ module.bias.data.zero_()
862
+ elif isinstance(module, nn.Embedding):
863
+ module.weight.data.normal_(mean=0.0, std=std)
864
+ if module.padding_idx is not None:
865
+ module.weight.data[module.padding_idx].zero_()
866
+
867
+
868
+ MIXTRAL_INPUTS_DOCSTRING = r"""
869
+ Args:
870
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
871
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
872
+ it.
873
+
874
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
875
+ [`PreTrainedTokenizer.__call__`] for details.
876
+
877
+ [What are input IDs?](../glossary#input-ids)
878
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
879
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
880
+
881
+ - 1 for tokens that are **not masked**,
882
+ - 0 for tokens that are **masked**.
883
+
884
+ [What are attention masks?](../glossary#attention-mask)
885
+
886
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
887
+ [`PreTrainedTokenizer.__call__`] for details.
888
+
889
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
890
+ `past_key_values`).
891
+
892
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
893
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
894
+ information on the default strategy.
895
+
896
+ - 1 indicates the head is **not masked**,
897
+ - 0 indicates the head is **masked**.
898
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
899
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
900
+ config.n_positions - 1]`.
901
+
902
+ [What are position IDs?](../glossary#position-ids)
903
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
904
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
905
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
906
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
907
+
908
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
909
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
910
+
911
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
912
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
913
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
914
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
915
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
916
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
917
+ model's internal embedding lookup matrix.
918
+ use_cache (`bool`, *optional*):
919
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
920
+ `past_key_values`).
921
+ output_attentions (`bool`, *optional*):
922
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
923
+ tensors for more detail.
924
+ output_hidden_states (`bool`, *optional*):
925
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
926
+ more detail.
927
+ output_router_logits (`bool`, *optional*):
928
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
929
+ should not be returned during inference.
930
+ return_dict (`bool`, *optional*):
931
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
932
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
933
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
934
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
935
+ the complete sequence length.
936
+ """
937
+
938
+
939
+ @add_start_docstrings(
940
+ "The bare Mixtral Model outputting raw hidden-states without any specific head on top.",
941
+ MIXTRAL_START_DOCSTRING,
942
+ )
943
+ # copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->MIXTRAL,Mistral->Mixtral
944
+ # TODO @longjie no longer copied from Mistral after static cache
945
+ class MixtralModel(MixtralPreTrainedModel):
946
+ """
947
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MixtralDecoderLayer`]
948
+
949
+ Args:
950
+ config: MixtralConfig
951
+ """
952
+
953
+ def __init__(self, config: MixtralConfig):
954
+ super().__init__(config)
955
+ self.padding_idx = config.pad_token_id
956
+ self.vocab_size = config.vocab_size
957
+
958
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
959
+ self.layers = nn.ModuleList(
960
+ [MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
961
+ )
962
+ self._attn_implementation = config._attn_implementation
963
+ self.norm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
964
+
965
+ self.gradient_checkpointing = False
966
+ # Initialize weights and apply final processing
967
+ self.post_init()
968
+
969
+ def get_input_embeddings(self):
970
+ return self.embed_tokens
971
+
972
+ def set_input_embeddings(self, value):
973
+ self.embed_tokens = value
974
+
975
+ # Ignore copy
976
+ @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
977
+ def forward(
978
+ self,
979
+ input_ids: torch.LongTensor = None,
980
+ attention_mask: Optional[torch.Tensor] = None,
981
+ position_ids: Optional[torch.LongTensor] = None,
982
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
983
+ inputs_embeds: Optional[torch.FloatTensor] = None,
984
+ use_cache: Optional[bool] = None,
985
+ output_attentions: Optional[bool] = None,
986
+ output_hidden_states: Optional[bool] = None,
987
+ output_router_logits: Optional[bool] = None,
988
+ return_dict: Optional[bool] = None,
989
+ cache_position: Optional[torch.LongTensor] = None,
990
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
991
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
992
+ output_router_logits = (
993
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
994
+ )
995
+ output_hidden_states = (
996
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
997
+ )
998
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
999
+
1000
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1001
+
1002
+ if (input_ids is None) ^ (inputs_embeds is not None):
1003
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1004
+
1005
+ if self.gradient_checkpointing and self.training:
1006
+ if use_cache:
1007
+ logger.warning_once(
1008
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1009
+ )
1010
+ use_cache = False
1011
+
1012
+ # kept for BC (non `Cache` `past_key_values` inputs)
1013
+ return_legacy_cache = False
1014
+ if use_cache and not isinstance(past_key_values, Cache):
1015
+ return_legacy_cache = True
1016
+ if past_key_values is None:
1017
+ past_key_values = DynamicCache()
1018
+ else:
1019
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1020
+ logger.warning_once(
1021
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
1022
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
1023
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
1024
+ )
1025
+
1026
+ if inputs_embeds is None:
1027
+ inputs_embeds = self.embed_tokens(input_ids)
1028
+
1029
+ if cache_position is None:
1030
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1031
+ cache_position = torch.arange(
1032
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1033
+ )
1034
+ if position_ids is None:
1035
+ position_ids = cache_position.unsqueeze(0)
1036
+
1037
+ causal_mask = self._update_causal_mask(
1038
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
1039
+ )
1040
+
1041
+ hidden_states = inputs_embeds
1042
+
1043
+ # decoder layers
1044
+ all_hidden_states = () if output_hidden_states else None
1045
+ all_self_attns = () if output_attentions else None
1046
+ all_router_logits = () if output_router_logits else None
1047
+ next_decoder_cache = None
1048
+
1049
+ for decoder_layer in self.layers:
1050
+ if output_hidden_states:
1051
+ all_hidden_states += (hidden_states,)
1052
+
1053
+ if self.gradient_checkpointing and self.training:
1054
+ layer_outputs = self._gradient_checkpointing_func(
1055
+ decoder_layer.__call__,
1056
+ hidden_states,
1057
+ causal_mask,
1058
+ position_ids,
1059
+ past_key_values,
1060
+ output_attentions,
1061
+ output_router_logits,
1062
+ use_cache,
1063
+ cache_position,
1064
+ )
1065
+ else:
1066
+ layer_outputs = decoder_layer(
1067
+ hidden_states,
1068
+ attention_mask=causal_mask,
1069
+ position_ids=position_ids,
1070
+ past_key_value=past_key_values,
1071
+ output_attentions=output_attentions,
1072
+ output_router_logits=output_router_logits,
1073
+ use_cache=use_cache,
1074
+ cache_position=cache_position,
1075
+ )
1076
+
1077
+ hidden_states = layer_outputs[0]
1078
+
1079
+ if use_cache:
1080
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1081
+
1082
+ if output_attentions:
1083
+ all_self_attns += (layer_outputs[1],)
1084
+
1085
+ if output_router_logits:
1086
+ all_router_logits += (layer_outputs[-1],)
1087
+
1088
+ hidden_states = self.norm(hidden_states)
1089
+
1090
+ # add hidden states from the last decoder layer
1091
+ if output_hidden_states:
1092
+ all_hidden_states += (hidden_states,)
1093
+
1094
+ next_cache = next_decoder_cache if use_cache else None
1095
+ if return_legacy_cache:
1096
+ next_cache = next_cache.to_legacy_cache()
1097
+
1098
+ if not return_dict:
1099
+ return tuple(
1100
+ v
1101
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1102
+ if v is not None
1103
+ )
1104
+ return MoeModelOutputWithPast(
1105
+ last_hidden_state=hidden_states,
1106
+ past_key_values=next_cache,
1107
+ hidden_states=all_hidden_states,
1108
+ attentions=all_self_attns,
1109
+ router_logits=all_router_logits,
1110
+ )
1111
+
1112
+ # Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask
1113
+ def _update_causal_mask(
1114
+ self,
1115
+ attention_mask: torch.Tensor,
1116
+ input_tensor: torch.Tensor,
1117
+ cache_position: torch.Tensor,
1118
+ past_key_values: Cache,
1119
+ output_attentions: bool,
1120
+ ):
1121
+ if self.config._attn_implementation == "flash_attention_2":
1122
+ if attention_mask is not None and 0.0 in attention_mask:
1123
+ return attention_mask
1124
+ return None
1125
+
1126
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1127
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1128
+ # to infer the attention mask.
1129
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1130
+ using_static_cache = isinstance(past_key_values, StaticCache)
1131
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
1132
+
1133
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1134
+ if (
1135
+ self.config._attn_implementation == "sdpa"
1136
+ and not (using_static_cache or using_sliding_window_cache)
1137
+ and not output_attentions
1138
+ ):
1139
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1140
+ attention_mask,
1141
+ inputs_embeds=input_tensor,
1142
+ past_key_values_length=past_seen_tokens,
1143
+ sliding_window=self.config.sliding_window,
1144
+ is_training=self.training,
1145
+ ):
1146
+ return None
1147
+
1148
+ dtype, device = input_tensor.dtype, input_tensor.device
1149
+ min_dtype = torch.finfo(dtype).min
1150
+ sequence_length = input_tensor.shape[1]
1151
+ # SlidingWindowCache or StaticCache
1152
+ if using_sliding_window_cache or using_static_cache:
1153
+ target_length = past_key_values.get_max_cache_shape()
1154
+ # DynamicCache or no cache
1155
+ else:
1156
+ target_length = (
1157
+ attention_mask.shape[-1]
1158
+ if isinstance(attention_mask, torch.Tensor)
1159
+ else past_seen_tokens + sequence_length + 1
1160
+ )
1161
+
1162
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1163
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1164
+ attention_mask,
1165
+ sequence_length=sequence_length,
1166
+ target_length=target_length,
1167
+ dtype=dtype,
1168
+ device=device,
1169
+ cache_position=cache_position,
1170
+ batch_size=input_tensor.shape[0],
1171
+ config=self.config,
1172
+ past_key_values=past_key_values,
1173
+ )
1174
+
1175
+ if (
1176
+ self.config._attn_implementation == "sdpa"
1177
+ and attention_mask is not None
1178
+ and attention_mask.device.type == "cuda"
1179
+ and not output_attentions
1180
+ ):
1181
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1182
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1183
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1184
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1185
+
1186
+ return causal_mask
1187
+
1188
+ @staticmethod
1189
+ # Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Mixtral
1190
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1191
+ attention_mask: torch.Tensor,
1192
+ sequence_length: int,
1193
+ target_length: int,
1194
+ dtype: torch.dtype,
1195
+ device: torch.device,
1196
+ cache_position: torch.Tensor,
1197
+ batch_size: int,
1198
+ config: MixtralConfig,
1199
+ past_key_values: Cache,
1200
+ ):
1201
+ """
1202
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1203
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1204
+
1205
+ Args:
1206
+ attention_mask (`torch.Tensor`):
1207
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
1208
+ sequence_length (`int`):
1209
+ The sequence length being processed.
1210
+ target_length (`int`):
1211
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
1212
+ dtype (`torch.dtype`):
1213
+ The dtype to use for the 4D attention mask.
1214
+ device (`torch.device`):
1215
+ The device to plcae the 4D attention mask on.
1216
+ cache_position (`torch.Tensor`):
1217
+ Indices depicting the position of the input sequence tokens in the sequence.
1218
+ batch_size (`torch.Tensor`):
1219
+ Batch size.
1220
+ config (`MixtralConfig`):
1221
+ The model's configuration class
1222
+ past_key_values (`Cache`):
1223
+ The cache class that is being used currently to generate
1224
+ """
1225
+ if attention_mask is not None and attention_mask.dim() == 4:
1226
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1227
+ causal_mask = attention_mask
1228
+ else:
1229
+ min_dtype = torch.finfo(dtype).min
1230
+ causal_mask = torch.full(
1231
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1232
+ )
1233
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1234
+ if config.sliding_window is not None:
1235
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
1236
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
1237
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
1238
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
1239
+ cache_position.reshape(-1, 1) - config.sliding_window
1240
+ )
1241
+ diagonal_attend_mask |= sliding_attend_mask
1242
+ causal_mask *= diagonal_attend_mask
1243
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1244
+ if attention_mask is not None:
1245
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1246
+ if attention_mask.shape[-1] > target_length:
1247
+ attention_mask = attention_mask[:, :target_length]
1248
+ mask_length = attention_mask.shape[-1]
1249
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1250
+ padding_mask = padding_mask == 0
1251
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1252
+ padding_mask, min_dtype
1253
+ )
1254
+ return causal_mask
1255
+
1256
+
1257
+ class MixtralForCausalLM(MixtralPreTrainedModel, GenerationMixin):
1258
+ _tied_weights_keys = ["lm_head.weight"]
1259
+
1260
+ def __init__(self, config):
1261
+ super().__init__(config)
1262
+ self.model = MixtralModel(config)
1263
+ self.vocab_size = config.vocab_size
1264
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1265
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1266
+ self.num_experts = config.num_local_experts
1267
+ self.num_experts_per_tok = config.num_experts_per_tok
1268
+ # Initialize weights and apply final processing
1269
+ self.post_init()
1270
+
1271
+ def get_input_embeddings(self):
1272
+ return self.model.embed_tokens
1273
+
1274
+ def set_input_embeddings(self, value):
1275
+ self.model.embed_tokens = value
1276
+
1277
+ def get_output_embeddings(self):
1278
+ return self.lm_head
1279
+
1280
+ def set_output_embeddings(self, new_embeddings):
1281
+ self.lm_head = new_embeddings
1282
+
1283
+ def set_decoder(self, decoder):
1284
+ self.model = decoder
1285
+
1286
+ def get_decoder(self):
1287
+ return self.model
1288
+
1289
+ @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
1290
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1291
+ # Ignore copy
1292
+ def forward(
1293
+ self,
1294
+ input_ids: torch.LongTensor = None,
1295
+ attention_mask: Optional[torch.Tensor] = None,
1296
+ position_ids: Optional[torch.LongTensor] = None,
1297
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1298
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1299
+ labels: Optional[torch.LongTensor] = None,
1300
+ use_cache: Optional[bool] = None,
1301
+ output_attentions: Optional[bool] = None,
1302
+ output_hidden_states: Optional[bool] = None,
1303
+ output_router_logits: Optional[bool] = None,
1304
+ return_dict: Optional[bool] = None,
1305
+ cache_position: Optional[torch.LongTensor] = None,
1306
+ num_logits_to_keep: int = 0,
1307
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1308
+ r"""
1309
+ Args:
1310
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1311
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1312
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1313
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1314
+
1315
+ num_logits_to_keep (`int`, *optional*):
1316
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1317
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1318
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1319
+
1320
+ Returns:
1321
+
1322
+ Example:
1323
+
1324
+ ```python
1325
+ >>> from transformers import AutoTokenizer, MixtralForCausalLM
1326
+
1327
+ >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
1328
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
1329
+
1330
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1331
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1332
+
1333
+ >>> # Generate
1334
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1335
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1336
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1337
+ ```"""
1338
+
1339
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1340
+ output_router_logits = (
1341
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1342
+ )
1343
+
1344
+ output_hidden_states = (
1345
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1346
+ )
1347
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1348
+
1349
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1350
+ outputs = self.model(
1351
+ input_ids=input_ids,
1352
+ attention_mask=attention_mask,
1353
+ position_ids=position_ids,
1354
+ past_key_values=past_key_values,
1355
+ inputs_embeds=inputs_embeds,
1356
+ use_cache=use_cache,
1357
+ output_attentions=output_attentions,
1358
+ output_hidden_states=output_hidden_states,
1359
+ output_router_logits=output_router_logits,
1360
+ return_dict=return_dict,
1361
+ cache_position=cache_position,
1362
+ )
1363
+
1364
+ hidden_states = outputs[0]
1365
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1366
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
1367
+
1368
+ loss = None
1369
+ if labels is not None:
1370
+ # Upcast to float if we need to compute the loss to avoid potential precision issues
1371
+ logits = logits.float()
1372
+ # Shift so that tokens < n predict n
1373
+ shift_logits = logits[..., :-1, :].contiguous()
1374
+ shift_labels = labels[..., 1:].contiguous()
1375
+ # Flatten the tokens
1376
+ loss_fct = CrossEntropyLoss()
1377
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1378
+ shift_labels = shift_labels.view(-1)
1379
+ # Enable model parallelism
1380
+ shift_labels = shift_labels.to(shift_logits.device)
1381
+ loss = loss_fct(shift_logits, shift_labels)
1382
+
1383
+ aux_loss = None
1384
+ if output_router_logits:
1385
+ aux_loss = load_balancing_loss_func(
1386
+ outputs.router_logits if return_dict else outputs[-1],
1387
+ self.num_experts,
1388
+ self.num_experts_per_tok,
1389
+ attention_mask,
1390
+ )
1391
+ if labels is not None:
1392
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1393
+
1394
+ if not return_dict:
1395
+ output = (logits,) + outputs[1:]
1396
+ if output_router_logits:
1397
+ output = (aux_loss,) + output
1398
+ return (loss,) + output if loss is not None else output
1399
+
1400
+ return MoeCausalLMOutputWithPast(
1401
+ loss=loss,
1402
+ aux_loss=aux_loss,
1403
+ logits=logits,
1404
+ past_key_values=outputs.past_key_values,
1405
+ hidden_states=outputs.hidden_states,
1406
+ attentions=outputs.attentions,
1407
+ router_logits=outputs.router_logits,
1408
+ )
1409
+
1410
+ def prepare_inputs_for_generation(
1411
+ self,
1412
+ input_ids,
1413
+ past_key_values=None,
1414
+ attention_mask=None,
1415
+ inputs_embeds=None,
1416
+ cache_position=None,
1417
+ output_router_logits=False,
1418
+ position_ids=None,
1419
+ use_cache=True,
1420
+ num_logits_to_keep=None,
1421
+ **kwargs,
1422
+ ):
1423
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1424
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1425
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1426
+ if past_key_values is not None:
1427
+ if inputs_embeds is not None: # Exception 1
1428
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1429
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1430
+ input_ids = input_ids[:, cache_position]
1431
+
1432
+ if attention_mask is not None and position_ids is None:
1433
+ # create position_ids on the fly for batch generation
1434
+ position_ids = attention_mask.long().cumsum(-1) - 1
1435
+ position_ids.masked_fill_(attention_mask == 0, 1)
1436
+ if past_key_values:
1437
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1438
+
1439
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1440
+ if inputs_embeds is not None and cache_position[0] == 0:
1441
+ model_inputs = {"inputs_embeds": inputs_embeds}
1442
+ else:
1443
+ model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
1444
+
1445
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1446
+ if model_inputs["inputs_embeds"] is not None:
1447
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1448
+ device = model_inputs["inputs_embeds"].device
1449
+ else:
1450
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1451
+ device = model_inputs["input_ids"].device
1452
+
1453
+ attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
1454
+ attention_mask,
1455
+ sequence_length=sequence_length,
1456
+ target_length=past_key_values.get_max_cache_shape(),
1457
+ dtype=self.lm_head.weight.dtype,
1458
+ device=device,
1459
+ cache_position=cache_position,
1460
+ batch_size=batch_size,
1461
+ config=self.config,
1462
+ past_key_values=past_key_values,
1463
+ )
1464
+
1465
+ if num_logits_to_keep is not None:
1466
+ model_inputs["num_logits_to_keep"] = num_logits_to_keep
1467
+
1468
+ model_inputs.update(
1469
+ {
1470
+ "position_ids": position_ids,
1471
+ "cache_position": cache_position,
1472
+ "past_key_values": past_key_values,
1473
+ "use_cache": use_cache,
1474
+ "attention_mask": attention_mask,
1475
+ "output_router_logits": output_router_logits,
1476
+ }
1477
+ )
1478
+ return model_inputs
1479
+
1480
+
1481
+ @add_start_docstrings(
1482
+ """
1483
+ The Mixtral Model transformer with a sequence classification head on top (linear layer).
1484
+
1485
+ [`MixtralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1486
+ (e.g. GPT-2) do.
1487
+
1488
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1489
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1490
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1491
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1492
+ each row of the batch).
1493
+ """,
1494
+ MIXTRAL_START_DOCSTRING,
1495
+ )
1496
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mixtral, LLAMA->MIXTRAL
1497
+ class MixtralForSequenceClassification(MixtralPreTrainedModel):
1498
+ def __init__(self, config):
1499
+ super().__init__(config)
1500
+ self.num_labels = config.num_labels
1501
+ self.model = MixtralModel(config)
1502
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1503
+
1504
+ # Initialize weights and apply final processing
1505
+ self.post_init()
1506
+
1507
+ def get_input_embeddings(self):
1508
+ return self.model.embed_tokens
1509
+
1510
+ def set_input_embeddings(self, value):
1511
+ self.model.embed_tokens = value
1512
+
1513
+ @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
1514
+ def forward(
1515
+ self,
1516
+ input_ids: Optional[torch.LongTensor] = None,
1517
+ attention_mask: Optional[torch.Tensor] = None,
1518
+ position_ids: Optional[torch.LongTensor] = None,
1519
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1520
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1521
+ labels: Optional[torch.LongTensor] = None,
1522
+ use_cache: Optional[bool] = None,
1523
+ output_attentions: Optional[bool] = None,
1524
+ output_hidden_states: Optional[bool] = None,
1525
+ return_dict: Optional[bool] = None,
1526
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1527
+ r"""
1528
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1529
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1530
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1531
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1532
+ """
1533
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1534
+
1535
+ transformer_outputs = self.model(
1536
+ input_ids,
1537
+ attention_mask=attention_mask,
1538
+ position_ids=position_ids,
1539
+ past_key_values=past_key_values,
1540
+ inputs_embeds=inputs_embeds,
1541
+ use_cache=use_cache,
1542
+ output_attentions=output_attentions,
1543
+ output_hidden_states=output_hidden_states,
1544
+ return_dict=return_dict,
1545
+ )
1546
+ hidden_states = transformer_outputs[0]
1547
+ logits = self.score(hidden_states)
1548
+
1549
+ if input_ids is not None:
1550
+ batch_size = input_ids.shape[0]
1551
+ else:
1552
+ batch_size = inputs_embeds.shape[0]
1553
+
1554
+ if self.config.pad_token_id is None and batch_size != 1:
1555
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1556
+ if self.config.pad_token_id is None:
1557
+ sequence_lengths = -1
1558
+ else:
1559
+ if input_ids is not None:
1560
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1561
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1562
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1563
+ sequence_lengths = sequence_lengths.to(logits.device)
1564
+ else:
1565
+ sequence_lengths = -1
1566
+
1567
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1568
+
1569
+ loss = None
1570
+ if labels is not None:
1571
+ labels = labels.to(logits.device)
1572
+ if self.config.problem_type is None:
1573
+ if self.num_labels == 1:
1574
+ self.config.problem_type = "regression"
1575
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1576
+ self.config.problem_type = "single_label_classification"
1577
+ else:
1578
+ self.config.problem_type = "multi_label_classification"
1579
+
1580
+ if self.config.problem_type == "regression":
1581
+ loss_fct = MSELoss()
1582
+ if self.num_labels == 1:
1583
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1584
+ else:
1585
+ loss = loss_fct(pooled_logits, labels)
1586
+ elif self.config.problem_type == "single_label_classification":
1587
+ loss_fct = CrossEntropyLoss()
1588
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1589
+ elif self.config.problem_type == "multi_label_classification":
1590
+ loss_fct = BCEWithLogitsLoss()
1591
+ loss = loss_fct(pooled_logits, labels)
1592
+ if not return_dict:
1593
+ output = (pooled_logits,) + transformer_outputs[1:]
1594
+ return ((loss,) + output) if loss is not None else output
1595
+
1596
+ return SequenceClassifierOutputWithPast(
1597
+ loss=loss,
1598
+ logits=pooled_logits,
1599
+ past_key_values=transformer_outputs.past_key_values,
1600
+ hidden_states=transformer_outputs.hidden_states,
1601
+ attentions=transformer_outputs.attentions,
1602
+ )
1603
+
1604
+
1605
+ @add_start_docstrings(
1606
+ """
1607
+ The Mixtral Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1608
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1609
+ """,
1610
+ MIXTRAL_START_DOCSTRING,
1611
+ )
1612
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Mixtral, LLAMA->MIXTRAL
1613
+ class MixtralForTokenClassification(MixtralPreTrainedModel):
1614
+ def __init__(self, config):
1615
+ super().__init__(config)
1616
+ self.num_labels = config.num_labels
1617
+ self.model = MixtralModel(config)
1618
+ if getattr(config, "classifier_dropout", None) is not None:
1619
+ classifier_dropout = config.classifier_dropout
1620
+ elif getattr(config, "hidden_dropout", None) is not None:
1621
+ classifier_dropout = config.hidden_dropout
1622
+ else:
1623
+ classifier_dropout = 0.1
1624
+ self.dropout = nn.Dropout(classifier_dropout)
1625
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1626
+
1627
+ # Initialize weights and apply final processing
1628
+ self.post_init()
1629
+
1630
+ def get_input_embeddings(self):
1631
+ return self.model.embed_tokens
1632
+
1633
+ def set_input_embeddings(self, value):
1634
+ self.model.embed_tokens = value
1635
+
1636
+ @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
1637
+ def forward(
1638
+ self,
1639
+ input_ids: Optional[torch.LongTensor] = None,
1640
+ attention_mask: Optional[torch.Tensor] = None,
1641
+ position_ids: Optional[torch.LongTensor] = None,
1642
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1643
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1644
+ labels: Optional[torch.LongTensor] = None,
1645
+ use_cache: Optional[bool] = None,
1646
+ output_attentions: Optional[bool] = None,
1647
+ output_hidden_states: Optional[bool] = None,
1648
+ return_dict: Optional[bool] = None,
1649
+ ) -> Union[Tuple, TokenClassifierOutput]:
1650
+ r"""
1651
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1652
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1653
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1654
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1655
+ """
1656
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1657
+
1658
+ outputs = self.model(
1659
+ input_ids,
1660
+ attention_mask=attention_mask,
1661
+ position_ids=position_ids,
1662
+ past_key_values=past_key_values,
1663
+ inputs_embeds=inputs_embeds,
1664
+ use_cache=use_cache,
1665
+ output_attentions=output_attentions,
1666
+ output_hidden_states=output_hidden_states,
1667
+ return_dict=return_dict,
1668
+ )
1669
+ sequence_output = outputs[0]
1670
+ sequence_output = self.dropout(sequence_output)
1671
+ logits = self.score(sequence_output)
1672
+
1673
+ loss = None
1674
+ if labels is not None:
1675
+ loss_fct = CrossEntropyLoss()
1676
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1677
+
1678
+ if not return_dict:
1679
+ output = (logits,) + outputs[2:]
1680
+ return ((loss,) + output) if loss is not None else output
1681
+
1682
+ return TokenClassifierOutput(
1683
+ loss=loss,
1684
+ logits=logits,
1685
+ hidden_states=outputs.hidden_states,
1686
+ attentions=outputs.attentions,
1687
+ )
1688
+
1689
+
1690
+ @add_start_docstrings(
1691
+ """
1692
+ The Mixtral Model transformer with a span classification head on top for extractive question-answering tasks like
1693
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1694
+ """,
1695
+ MIXTRAL_START_DOCSTRING,
1696
+ )
1697
+ # Copied from transformers.models.mistral.modeling_mistral.MistralForQuestionAnswering with Mistral->Mixtral, MISTRAL->MIXTRAL
1698
+ class MixtralForQuestionAnswering(MixtralPreTrainedModel):
1699
+ base_model_prefix = "model"
1700
+
1701
+ # Copied from models.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Mixtral
1702
+ def __init__(self, config):
1703
+ super().__init__(config)
1704
+ self.model = MixtralModel(config)
1705
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1706
+
1707
+ # Initialize weights and apply final processing
1708
+ self.post_init()
1709
+
1710
+ def get_input_embeddings(self):
1711
+ return self.model.embed_tokens
1712
+
1713
+ def set_input_embeddings(self, value):
1714
+ self.model.embed_tokens = value
1715
+
1716
+ @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
1717
+ def forward(
1718
+ self,
1719
+ input_ids: Optional[torch.LongTensor] = None,
1720
+ attention_mask: Optional[torch.FloatTensor] = None,
1721
+ position_ids: Optional[torch.LongTensor] = None,
1722
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1723
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1724
+ start_positions: Optional[torch.LongTensor] = None,
1725
+ end_positions: Optional[torch.LongTensor] = None,
1726
+ output_attentions: Optional[bool] = None,
1727
+ output_hidden_states: Optional[bool] = None,
1728
+ return_dict: Optional[bool] = None,
1729
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1730
+ r"""
1731
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1732
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1733
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1734
+ are not taken into account for computing the loss.
1735
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1736
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1737
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1738
+ are not taken into account for computing the loss.
1739
+ """
1740
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1741
+
1742
+ outputs = self.model(
1743
+ input_ids,
1744
+ attention_mask=attention_mask,
1745
+ position_ids=position_ids,
1746
+ past_key_values=past_key_values,
1747
+ inputs_embeds=inputs_embeds,
1748
+ output_attentions=output_attentions,
1749
+ output_hidden_states=output_hidden_states,
1750
+ return_dict=return_dict,
1751
+ )
1752
+
1753
+ sequence_output = outputs[0]
1754
+
1755
+ logits = self.qa_outputs(sequence_output)
1756
+ start_logits, end_logits = logits.split(1, dim=-1)
1757
+ start_logits = start_logits.squeeze(-1).contiguous()
1758
+ end_logits = end_logits.squeeze(-1).contiguous()
1759
+
1760
+ total_loss = None
1761
+ if start_positions is not None and end_positions is not None:
1762
+ # If we are on multi-GPU, split add a dimension
1763
+ if len(start_positions.size()) > 1:
1764
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1765
+ if len(end_positions.size()) > 1:
1766
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1767
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1768
+ ignored_index = start_logits.size(1)
1769
+ start_positions = start_positions.clamp(0, ignored_index)
1770
+ end_positions = end_positions.clamp(0, ignored_index)
1771
+
1772
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1773
+ start_loss = loss_fct(start_logits, start_positions)
1774
+ end_loss = loss_fct(end_logits, end_positions)
1775
+ total_loss = (start_loss + end_loss) / 2
1776
+
1777
+ if not return_dict:
1778
+ output = (start_logits, end_logits) + outputs[2:]
1779
+ return ((total_loss,) + output) if total_loss is not None else output
1780
+
1781
+ return QuestionAnsweringModelOutput(
1782
+ loss=total_loss,
1783
+ start_logits=start_logits,
1784
+ end_logits=end_logits,
1785
+ hidden_states=outputs.hidden_states,
1786
+ attentions=outputs.attentions,
1787
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": null,
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }