Skip to content

Commit

Permalink
Mixtral: More correct MoE, lower loss (#932)
Browse files Browse the repository at this point in the history
* More correct MoE

* Fix formatting
  • Loading branch information
casper-hansen committed Dec 10, 2023
1 parent 35f9b0f commit 86487c2
Showing 1 changed file with 7 additions and 8 deletions.
15 changes: 7 additions & 8 deletions src/axolotl/models/mixtral/modeling_moe_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,23 +215,22 @@ def __init__(
):
super().__init__()
self.config = config
num_experts = config.num_experts
self.experts = nn.ModuleList([FeedForward(config) for i in range(num_experts)])
self.gate = nn.Linear(config.hidden_size, num_experts, bias=False)
self.num_experts_per_token = config.num_experts_per_token
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = nn.ModuleList(
[FeedForward(config) for i in range(config.num_experts)]
)

def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])

scores = self.gate(x)
scores = self.gate(x).softmax(dim=-1)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_token, dim=-1
scores, self.config.num_experts_per_token, dim=-1
)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)

x = x.repeat_interleave(self.num_experts_per_token, dim=0)
x = x.repeat_interleave(self.config.num_experts_per_token, dim=0)
y = torch.empty_like(x)
for i, expert in enumerate(self.experts):
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
Expand Down

0 comments on commit 86487c2

Please sign in to comment.