casperhansen commited on
Commit
86487c2
·
unverified ·
1 Parent(s): 35f9b0f

Mixtral: More correct MoE, lower loss (#932)

Browse files
src/axolotl/models/mixtral/modeling_moe_mistral.py CHANGED
@@ -215,23 +215,22 @@ class MoE(nn.Module):
215
  ):
216
  super().__init__()
217
  self.config = config
218
- num_experts = config.num_experts
219
- self.experts = nn.ModuleList([FeedForward(config) for i in range(num_experts)])
220
- self.gate = nn.Linear(config.hidden_size, num_experts, bias=False)
221
- self.num_experts_per_token = config.num_experts_per_token
222
 
223
  def forward(self, x):
224
  orig_shape = x.shape
225
  x = x.view(-1, x.shape[-1])
226
 
227
- scores = self.gate(x)
228
  expert_weights, expert_indices = torch.topk(
229
- scores, self.num_experts_per_token, dim=-1
230
  )
231
- expert_weights = expert_weights.softmax(dim=-1)
232
  flat_expert_indices = expert_indices.view(-1)
233
 
234
- x = x.repeat_interleave(self.num_experts_per_token, dim=0)
235
  y = torch.empty_like(x)
236
  for i, expert in enumerate(self.experts):
237
  y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
 
215
  ):
216
  super().__init__()
217
  self.config = config
218
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
219
+ self.experts = nn.ModuleList(
220
+ [FeedForward(config) for i in range(config.num_experts)]
221
+ )
222
 
223
  def forward(self, x):
224
  orig_shape = x.shape
225
  x = x.view(-1, x.shape[-1])
226
 
227
+ scores = self.gate(x).softmax(dim=-1)
228
  expert_weights, expert_indices = torch.topk(
229
+ scores, self.config.num_experts_per_token, dim=-1
230
  )
 
231
  flat_expert_indices = expert_indices.view(-1)
232
 
233
+ x = x.repeat_interleave(self.config.num_experts_per_token, dim=0)
234
  y = torch.empty_like(x)
235
  for i, expert in enumerate(self.experts):
236
  y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])