-
-
Notifications
You must be signed in to change notification settings - Fork 3.5k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
376 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,373 @@ | ||
# coding=utf-8 | ||
# Adapted from | ||
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py | ||
# Copyright 2023 The vLLM team. | ||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. | ||
# | ||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX | ||
# and OPT implementations in this library. It has been modified from its | ||
# original forms to accommodate minor architectural differences compared | ||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
"""Inference-only Mistral model compatible with HuggingFace weights.""" | ||
from typing import List, Optional, Tuple | ||
|
||
import torch | ||
from torch import nn | ||
from transformers import MistralConfig | ||
|
||
import torch.nn.functional as F | ||
from vllm.model_executor.input_metadata import InputMetadata | ||
from vllm.model_executor.layers.activation import SiluAndMul | ||
from vllm.model_executor.layers.attention import PagedAttention | ||
from vllm.model_executor.layers.layernorm import RMSNorm | ||
from vllm.model_executor.layers.linear import (LinearMethodBase, | ||
ColumnParallelLinear, | ||
MergedColumnParallelLinear, | ||
QKVParallelLinear, | ||
RowParallelLinear) | ||
from vllm.model_executor.layers.rotary_embedding import get_rope | ||
from vllm.model_executor.layers.sampler import Sampler | ||
from vllm.model_executor.layers.vocab_parallel_embedding import ( | ||
VocabParallelEmbedding, ParallelLMHead) | ||
from vllm.model_executor.parallel_utils.parallel_state import ( | ||
get_tensor_model_parallel_world_size) | ||
from vllm.model_executor.sampling_metadata import SamplingMetadata | ||
from vllm.model_executor.weight_utils import (default_weight_loader, | ||
hf_model_weights_iterator) | ||
from vllm.sequence import SamplerOutput | ||
|
||
KVCache = Tuple[torch.Tensor, torch.Tensor] | ||
|
||
|
||
class FeedForward(nn.Module): | ||
def __init__( | ||
self, | ||
hidden_size: int, | ||
intermediate_size: int | ||
): | ||
""" | ||
Initialize the FeedForward module. | ||
Args: | ||
dim (int): Input dimension. | ||
hidden_dim (int): Hidden dimension of the feedforward layer. | ||
multiple_of (int): Value to ensure hidden dimension is a multiple of this value. | ||
ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None. | ||
Attributes: | ||
w1 (ColumnParallelLinear): Linear transformation for the first layer. | ||
w2 (RowParallelLinear): Linear transformation for the second layer. | ||
w3 (ColumnParallelLinear): Linear transformation for the third layer. | ||
""" | ||
super().__init__() | ||
|
||
self.w1 = ColumnParallelLinear( | ||
hidden_size, intermediate_size, bias=False | ||
) | ||
self.w2 = RowParallelLinear( | ||
intermediate_size, hidden_size, bias=False | ||
) | ||
self.w3 = ColumnParallelLinear( | ||
hidden_size, intermediate_size, bias=False | ||
) | ||
|
||
def forward(self, x): | ||
w1l, _ = self.w1(x) | ||
w3l, _ = self.w3(x) | ||
w2l, _ = self.w2(F.silu(w1l) * w3l) | ||
return w2l | ||
|
||
|
||
|
||
class MoE(nn.Module): | ||
def __init__( | ||
self, | ||
hidden_size, | ||
intermediate_size, | ||
num_experts: int = 8, | ||
num_experts_per_token: int = 2 | ||
): | ||
super().__init__() | ||
self.experts = nn.ModuleList([FeedForward(hidden_size, intermediate_size) for i in range(num_experts)]) | ||
self.gate = nn.Linear(hidden_size, num_experts, bias=False) | ||
self.num_experts_per_token = num_experts_per_token | ||
|
||
def forward(self, x): | ||
orig_shape = x.shape | ||
x = x.view(-1, x.shape[-1]) | ||
|
||
scores = self.gate(x) | ||
expert_weights, expert_indices = torch.topk(scores, self.num_experts_per_token, dim=-1) | ||
expert_weights = expert_weights.softmax(dim=-1) | ||
flat_expert_indices = expert_indices.view(-1) | ||
|
||
This comment has been minimized.
Sorry, something went wrong. |
||
x = x.repeat_interleave(self.num_experts_per_token, dim=0) | ||
y = torch.empty_like(x) | ||
for i, expert in enumerate(self.experts): | ||
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i]) | ||
y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(dim=1) | ||
return y.view(*orig_shape) | ||
|
||
|
||
|
||
class MistralAttention(nn.Module): | ||
|
||
def __init__(self, | ||
hidden_size: int, | ||
num_heads: int, | ||
num_kv_heads: int, | ||
max_position: int = 4096 * 32, | ||
rope_theta: float = 10000, | ||
linear_method: Optional[LinearMethodBase] = None, | ||
sliding_window: Optional[int] = None) -> None: | ||
super().__init__() | ||
self.hidden_size = hidden_size | ||
tp_size = get_tensor_model_parallel_world_size() | ||
self.total_num_heads = num_heads | ||
assert self.total_num_heads % tp_size == 0 | ||
self.num_heads = self.total_num_heads // tp_size | ||
self.total_num_kv_heads = num_kv_heads | ||
if self.total_num_kv_heads >= tp_size: | ||
# Number of KV heads is greater than TP size, so we partition | ||
# the KV heads across multiple tensor parallel GPUs. | ||
assert self.total_num_kv_heads % tp_size == 0 | ||
else: | ||
# Number of KV heads is less than TP size, so we replicate | ||
# the KV heads across multiple tensor parallel GPUs. | ||
assert tp_size % self.total_num_kv_heads == 0 | ||
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) | ||
self.head_dim = hidden_size // self.total_num_heads | ||
self.q_size = self.num_heads * self.head_dim | ||
self.kv_size = self.num_kv_heads * self.head_dim | ||
self.scaling = self.head_dim**-0.5 | ||
self.rope_theta = rope_theta | ||
self.sliding_window = sliding_window | ||
|
||
self.qkv_proj = QKVParallelLinear( | ||
hidden_size, | ||
self.head_dim, | ||
self.total_num_heads, | ||
self.total_num_kv_heads, | ||
bias=False, | ||
linear_method=linear_method, | ||
) | ||
self.o_proj = RowParallelLinear( | ||
self.total_num_heads * self.head_dim, | ||
hidden_size, | ||
bias=False, | ||
linear_method=linear_method, | ||
) | ||
|
||
self.rotary_emb = get_rope( | ||
self.head_dim, | ||
rotary_dim=self.head_dim, | ||
max_position=max_position, | ||
base=self.rope_theta, | ||
) | ||
self.attn = PagedAttention(self.num_heads, | ||
self.head_dim, | ||
self.scaling, | ||
num_kv_heads=self.num_kv_heads, | ||
sliding_window=self.sliding_window) | ||
|
||
def forward( | ||
self, | ||
positions: torch.Tensor, | ||
hidden_states: torch.Tensor, | ||
kv_cache: KVCache, | ||
input_metadata: InputMetadata, | ||
cache_event: Optional[torch.cuda.Event], | ||
) -> torch.Tensor: | ||
qkv, _ = self.qkv_proj(hidden_states) | ||
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) | ||
q, k = self.rotary_emb(positions, q, k) | ||
k_cache, v_cache = kv_cache | ||
attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata, | ||
cache_event) | ||
output, _ = self.o_proj(attn_output) | ||
return output | ||
|
||
|
||
class MixtralDecoderLayer(nn.Module): | ||
|
||
def __init__( | ||
self, | ||
config: MistralConfig, | ||
linear_method: Optional[LinearMethodBase] = None, | ||
) -> None: | ||
super().__init__() | ||
self.hidden_size = config.hidden_size | ||
# Requires transformers > 4.32.0 | ||
rope_theta = getattr(config, "rope_theta", 10000) | ||
self.self_attn = MistralAttention( | ||
hidden_size=self.hidden_size, | ||
num_heads=config.num_attention_heads, | ||
max_position=config.max_position_embeddings, | ||
num_kv_heads=config.num_key_value_heads, | ||
rope_theta=rope_theta, | ||
linear_method=linear_method, | ||
sliding_window=config.sliding_window) | ||
self.mlp = MoE( | ||
hidden_size=self.hidden_size, | ||
intermediate_size=config.intermediate_size, | ||
num_experts=config.num_experts, | ||
num_experts_per_token=config.num_experts_per_token | ||
) | ||
self.input_layernorm = RMSNorm(config.hidden_size, | ||
eps=config.rms_norm_eps) | ||
self.post_attention_layernorm = RMSNorm(config.hidden_size, | ||
eps=config.rms_norm_eps) | ||
|
||
def forward( | ||
self, | ||
positions: torch.Tensor, | ||
hidden_states: torch.Tensor, | ||
kv_cache: KVCache, | ||
input_metadata: InputMetadata, | ||
cache_event: Optional[torch.cuda.Event], | ||
residual: Optional[torch.Tensor], | ||
) -> Tuple[torch.Tensor, torch.Tensor]: | ||
# Self Attention | ||
if residual is None: | ||
residual = hidden_states | ||
hidden_states = self.input_layernorm(hidden_states) | ||
else: | ||
hidden_states, residual = self.input_layernorm( | ||
hidden_states, residual) | ||
hidden_states = self.self_attn( | ||
positions=positions, | ||
hidden_states=hidden_states, | ||
kv_cache=kv_cache, | ||
input_metadata=input_metadata, | ||
cache_event=cache_event, | ||
) | ||
|
||
# Fully Connected | ||
hidden_states, residual = self.post_attention_layernorm( | ||
hidden_states, residual) | ||
hidden_states = self.mlp(hidden_states) | ||
return hidden_states, residual | ||
|
||
|
||
class MixtralModel(nn.Module): | ||
|
||
def __init__( | ||
self, | ||
config: MistralConfig, | ||
linear_method: Optional[LinearMethodBase] = None, | ||
) -> None: | ||
super().__init__() | ||
self.config = config | ||
self.padding_idx = config.pad_token_id | ||
self.vocab_size = config.vocab_size | ||
|
||
self.embed_tokens = VocabParallelEmbedding( | ||
config.vocab_size, | ||
config.hidden_size, | ||
) | ||
self.layers = nn.ModuleList([ | ||
MixtralDecoderLayer(config, linear_method) | ||
for _ in range(config.num_hidden_layers) | ||
]) | ||
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) | ||
|
||
def forward( | ||
self, | ||
input_ids: torch.Tensor, | ||
positions: torch.Tensor, | ||
kv_caches: List[KVCache], | ||
input_metadata: InputMetadata, | ||
cache_events: Optional[List[torch.cuda.Event]], | ||
) -> torch.Tensor: | ||
hidden_states = self.embed_tokens(input_ids) | ||
residual = None | ||
for i in range(len(self.layers)): | ||
cache_event = None if cache_events is None else cache_events[i] | ||
layer = self.layers[i] | ||
hidden_states, residual = layer( | ||
positions, | ||
hidden_states, | ||
kv_caches[i], | ||
input_metadata, | ||
cache_event, | ||
residual, | ||
) | ||
hidden_states, _ = self.norm(hidden_states, residual) | ||
return hidden_states | ||
|
||
|
||
class MixtralForCausalLM(nn.Module): | ||
|
||
def __init__( | ||
self, | ||
config: MistralConfig, | ||
linear_method: Optional[LinearMethodBase] = None, | ||
) -> None: | ||
super().__init__() | ||
self.config = config | ||
self.linear_method = linear_method | ||
self.model = MixtralModel(config, linear_method) | ||
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) | ||
self.sampler = Sampler(config.vocab_size) | ||
|
||
def forward( | ||
self, | ||
input_ids: torch.Tensor, | ||
positions: torch.Tensor, | ||
kv_caches: List[KVCache], | ||
input_metadata: InputMetadata, | ||
cache_events: Optional[List[torch.cuda.Event]], | ||
) -> torch.Tensor: | ||
hidden_states = self.model(input_ids, positions, kv_caches, | ||
input_metadata, cache_events) | ||
return hidden_states | ||
|
||
def sample( | ||
self, | ||
hidden_states: torch.Tensor, | ||
sampling_metadata: SamplingMetadata, | ||
) -> SamplerOutput: | ||
next_tokens = self.sampler(self.lm_head.weight, hidden_states, | ||
sampling_metadata) | ||
return next_tokens | ||
|
||
def load_weights(self, | ||
model_name_or_path: str, | ||
cache_dir: Optional[str] = None, | ||
load_format: str = "auto", | ||
revision: Optional[str] = None): | ||
stacked_params_mapping = [ | ||
# (param_name, shard_name, shard_id) | ||
("qkv_proj", "q_proj", "q"), | ||
("qkv_proj", "k_proj", "k"), | ||
("qkv_proj", "v_proj", "v"), | ||
("gate_up_proj", "gate_proj", 0), | ||
("gate_up_proj", "up_proj", 1), | ||
] | ||
params_dict = dict(self.named_parameters()) | ||
for name, loaded_weight in hf_model_weights_iterator( | ||
model_name_or_path, cache_dir, load_format, revision): | ||
if "rotary_emb.inv_freq" in name: | ||
continue | ||
for (param_name, weight_name, shard_id) in stacked_params_mapping: | ||
if weight_name not in name: | ||
continue | ||
param = params_dict[name.replace(weight_name, param_name)] | ||
weight_loader = param.weight_loader | ||
weight_loader(param, loaded_weight, shard_id) | ||
break | ||
else: | ||
param = params_dict[name] | ||
weight_loader = getattr(param, "weight_loader", | ||
default_weight_loader) | ||
weight_loader(param, loaded_weight) |
I would recommend switching some of the operations for a more correct model.
We got lower loss when training Mixtral using the above. It also follows the original MegaBlocks.
axolotl-ai-cloud/axolotl#932