197 lines
7.3 KiB
Python
197 lines
7.3 KiB
Python
from typing import Callable, Optional
|
|
|
|
import torch
|
|
from torch import nn
|
|
|
|
from transformers.utils.generic import check_model_inputs
|
|
|
|
from ...cache_utils import Cache, DynamicCache
|
|
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
|
|
from ...modeling_flash_attention_utils import FlashAttentionKwargs
|
|
from ...modeling_layers import (
|
|
GenericForQuestionAnswering,
|
|
)
|
|
from ...modeling_outputs import BaseModelOutputWithPast
|
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
from ...processing_utils import Unpack
|
|
from ...utils import TransformersKwargs, auto_docstring, logging
|
|
from ..llama.modeling_llama import (
|
|
LlamaAttention,
|
|
LlamaDecoderLayer,
|
|
LlamaForCausalLM,
|
|
LlamaForSequenceClassification,
|
|
LlamaForTokenClassification,
|
|
LlamaMLP,
|
|
LlamaModel,
|
|
LlamaPreTrainedModel,
|
|
apply_rotary_pos_emb,
|
|
eager_attention_forward,
|
|
)
|
|
from .configuration_mistral import MistralConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
class MistralMLP(LlamaMLP):
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
|
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
|
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
|
|
|
|
|
class MistralAttention(LlamaAttention):
|
|
def __init__(self, config: MistralConfig, layer_idx: int):
|
|
super().__init__()
|
|
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
|
|
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
|
|
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
|
|
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
|
|
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
position_embeddings: tuple[torch.Tensor, torch.Tensor],
|
|
attention_mask: Optional[torch.Tensor],
|
|
past_key_value: Optional[Cache] = None,
|
|
cache_position: Optional[torch.LongTensor] = None,
|
|
**kwargs: Unpack[FlashAttentionKwargs],
|
|
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
|
|
input_shape = hidden_states.shape[:-1]
|
|
hidden_shape = (*input_shape, -1, self.head_dim)
|
|
|
|
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
|
|
cos, sin = position_embeddings
|
|
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
|
|
|
if past_key_value is not None:
|
|
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
|
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
|
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
|
attention_interface: Callable = eager_attention_forward
|
|
if self.config._attn_implementation != "eager":
|
|
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
|
|
|
attn_output, attn_weights = attention_interface(
|
|
self,
|
|
query_states,
|
|
key_states,
|
|
value_states,
|
|
attention_mask,
|
|
dropout=0.0 if not self.training else self.attention_dropout,
|
|
scaling=self.scaling,
|
|
sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
|
|
**kwargs,
|
|
)
|
|
|
|
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
|
attn_output = self.o_proj(attn_output)
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class MistralDecoderLayer(LlamaDecoderLayer):
|
|
def __init__(self, config: MistralConfig, layer_idx: int):
|
|
super().__init__(config, layer_idx)
|
|
self.self_attn = MistralAttention(config=config, layer_idx=layer_idx)
|
|
self.mlp = MistralMLP(config)
|
|
|
|
|
|
class MistralPreTrainedModel(LlamaPreTrainedModel):
|
|
_can_record_outputs = {
|
|
"hidden_states": MistralDecoderLayer,
|
|
"attentions": MistralAttention,
|
|
}
|
|
|
|
|
|
class MistralModel(LlamaModel):
|
|
@check_model_inputs
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.LongTensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
past_key_values: Optional[Cache] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
use_cache: Optional[bool] = None,
|
|
cache_position: Optional[torch.LongTensor] = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutputWithPast:
|
|
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.embed_tokens(input_ids)
|
|
|
|
if use_cache and past_key_values is None:
|
|
past_key_values = DynamicCache()
|
|
|
|
if cache_position is None:
|
|
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
cache_position = torch.arange(
|
|
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
|
)
|
|
|
|
if position_ids is None:
|
|
position_ids = cache_position.unsqueeze(0)
|
|
|
|
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
|
|
causal_mask = mask_function(
|
|
config=self.config,
|
|
input_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
cache_position=cache_position,
|
|
past_key_values=past_key_values,
|
|
position_ids=position_ids,
|
|
)
|
|
|
|
hidden_states = inputs_embeds
|
|
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
|
|
|
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
|
hidden_states = decoder_layer(
|
|
hidden_states,
|
|
attention_mask=causal_mask,
|
|
position_ids=position_ids,
|
|
past_key_value=past_key_values,
|
|
use_cache=use_cache,
|
|
cache_position=cache_position,
|
|
position_embeddings=position_embeddings,
|
|
**kwargs,
|
|
)
|
|
hidden_states = self.norm(hidden_states)
|
|
return BaseModelOutputWithPast(
|
|
last_hidden_state=hidden_states,
|
|
past_key_values=past_key_values if use_cache else None,
|
|
)
|
|
|
|
|
|
class MistralForCausalLM(LlamaForCausalLM):
|
|
pass
|
|
|
|
|
|
class MistralForTokenClassification(LlamaForTokenClassification):
|
|
pass
|
|
|
|
|
|
class MistralForSequenceClassification(LlamaForSequenceClassification):
|
|
pass
|
|
|
|
|
|
class MistralForQuestionAnswering(GenericForQuestionAnswering, MistralPreTrainedModel): ...
|
|
|
|
|
|
__all__ = [
|
|
"MistralForCausalLM",
|
|
"MistralForQuestionAnswering",
|
|
"MistralModel",
|
|
"MistralPreTrainedModel",
|
|
"MistralForSequenceClassification",
|
|
"MistralForTokenClassification",
|
|
]
|