| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from collections.abc import Callable |
| from typing import Optional |
|
|
| import torch |
| import torch.nn.functional as F |
| from torch import nn |
|
|
| from transformers import initialization as init |
| from transformers.activations import ACT2FN |
| from transformers.cache_utils import Cache, DynamicCache |
| from transformers.generation import GenerationMixin |
| from transformers.integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernelized_func |
| from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| from transformers.modeling_layers import GradientCheckpointingLayer |
| from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| from transformers.processing_utils import Unpack |
| from transformers.utils import auto_docstring, can_return_tuple |
| from transformers.utils.generic import TransformersKwargs, maybe_autocast |
| from transformers.utils.output_capturing import OutputRecorder, capture_outputs |
| from .configuration_laguna import LagunaConfig |
|
|
|
|
| @use_kernel_forward_from_hub("RMSNorm") |
| class LagunaRMSNorm(nn.Module): |
| def __init__(self, hidden_size, eps: float = 1e-6) -> None: |
| """ |
| LagunaRMSNorm is equivalent to T5LayerNorm |
| """ |
| super().__init__() |
| self.weight = nn.Parameter(torch.ones(hidden_size)) |
| self.variance_epsilon = eps |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| input_dtype = hidden_states.dtype |
| hidden_states = hidden_states.to(torch.float32) |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| return self.weight * hidden_states.to(input_dtype) |
|
|
| def extra_repr(self): |
| return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" |
|
|
|
|
| class LagunaRotaryEmbedding(nn.Module): |
| inv_freq: torch.Tensor |
|
|
| def __init__(self, config: LagunaConfig, device=None, layer_type=None): |
| super().__init__() |
| self.max_seq_len_cached = config.max_position_embeddings |
| self.original_max_seq_len = config.max_position_embeddings |
|
|
| self.config = config |
|
|
| self.layer_types = list(set(config.layer_types)) |
| self.rope_type = {} |
| for layer_type in self.layer_types: |
| rope_params = self.config.rope_parameters[layer_type] |
| if rope_params is None: |
| continue |
|
|
| self.rope_type[layer_type] = rope_params["rope_type"] |
| rope_init_fn: Callable = self.compute_default_rope_parameters |
| if self.rope_type[layer_type] != "default": |
| rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type[layer_type]] |
| curr_inv_freq, curr_attention_scaling = rope_init_fn(self.config, device, layer_type=layer_type) |
| self.register_buffer(f"{layer_type}_inv_freq", curr_inv_freq, persistent=False) |
| self.register_buffer(f"{layer_type}_original_inv_freq", curr_inv_freq.clone(), persistent=False) |
| setattr(self, f"{layer_type}_attention_scaling", curr_attention_scaling) |
|
|
| @staticmethod |
| def compute_default_rope_parameters( |
| config: LagunaConfig | None = None, |
| device: Optional["torch.device"] = None, |
| seq_len: int | None = None, |
| layer_type: str | None = None, |
| ) -> tuple["torch.Tensor", float]: |
| """ |
| Computes the inverse frequencies according to the original RoPE implementation |
| Args: |
| config ([`~transformers.PreTrainedConfig`]): |
| The model configuration. |
| device (`torch.device`): |
| The device to use for initialization of the inverse frequencies. |
| seq_len (`int`, *optional*): |
| The current sequence length. Unused for this type of RoPE. |
| layer_type (`str`, *optional*): |
| The current layer type if the model has different RoPE parameters per type. |
| Should not be used unless `config.layer_types is not None` |
| Returns: |
| Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the |
| post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). |
| """ |
| base = config.rope_parameters[layer_type]["rope_theta"] |
| |
| partial_rotary_factor = config.rope_parameters[layer_type].get("partial_rotary_factor", 1.0) |
| head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads |
| dim = int(head_dim * partial_rotary_factor) |
|
|
| attention_factor = 1.0 |
|
|
| |
| inv_freq = 1.0 / ( |
| base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) |
| ) |
| return inv_freq, attention_factor |
|
|
| @torch.no_grad() |
| @dynamic_rope_update |
| def forward(self, x, position_ids, layer_type=None): |
| inv_freq = getattr(self, f"{layer_type}_inv_freq") |
| attention_scaling = getattr(self, f"{layer_type}_attention_scaling") |
|
|
| inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) |
| position_ids_expanded = position_ids[:, None, :].float() |
|
|
| device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| with maybe_autocast(device_type=device_type, enabled=False): |
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) |
| emb = torch.cat((freqs, freqs), dim=-1) |
| cos = emb.cos() * attention_scaling |
| sin = emb.sin() * attention_scaling |
|
|
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
|
|
|
|
| class LagunaMLP(nn.Module): |
| def __init__(self, config, intermediate_size=None): |
| super().__init__() |
| self.config = config |
| self.hidden_size = config.hidden_size |
| self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size |
| self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) |
| self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) |
| self.act_fn = ACT2FN[config.hidden_act] |
|
|
| def forward(self, x): |
| down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) |
| return down_proj |
|
|
|
|
| class LagunaTopKRouter(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.top_k = config.num_experts_per_tok |
| self.num_experts = config.num_experts |
| self.hidden_dim = config.hidden_size |
| self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim)) |
| self.e_score_correction_bias = nn.Parameter(torch.zeros(config.num_experts), requires_grad=False) |
| self.router_logit_softcapping = config.moe_router_logit_softcapping |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| hidden_states = hidden_states.reshape(-1, self.hidden_dim) |
| router_logits = F.linear(hidden_states, self.weight).float() |
| |
| if self.router_logit_softcapping > 0.0: |
| router_logits = torch.tanh(router_logits / self.router_logit_softcapping) * self.router_logit_softcapping |
| |
| routing_scores = torch.sigmoid(router_logits) |
|
|
| scores_for_selection = routing_scores + self.e_score_correction_bias.to(routing_scores.dtype) |
| _, selected_experts = torch.topk(scores_for_selection, self.top_k, dim=-1) |
| routing_weights = routing_scores.gather(-1, selected_experts) |
| routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True) |
| routing_weights = routing_weights.to(hidden_states.dtype) |
|
|
| return router_logits, routing_weights, selected_experts |
|
|
|
|
| @use_experts_implementation |
| class LagunaExperts(nn.Module): |
| """Collection of expert weights stored as 3D tensors.""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.num_experts = config.num_experts |
| self.hidden_dim = config.hidden_size |
| self.intermediate_dim = config.moe_intermediate_size |
| self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim)) |
| self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim)) |
| self.act_fn = ACT2FN[config.hidden_act] |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| top_k_index: torch.Tensor, |
| top_k_weights: torch.Tensor, |
| ) -> torch.Tensor: |
| final_hidden_states = torch.zeros_like(hidden_states) |
| with torch.no_grad(): |
| expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts) |
| expert_mask = expert_mask.permute(2, 1, 0) |
| expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() |
|
|
| for expert_idx in expert_hit: |
| expert_idx = expert_idx[0] |
| if expert_idx == self.num_experts: |
| continue |
| top_k_pos, token_idx = torch.where(expert_mask[expert_idx]) |
| current_state = hidden_states[token_idx] |
| gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1) |
| current_hidden_states = self.act_fn(gate) * up |
| current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx]) |
| current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None] |
| final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype)) |
|
|
| return final_hidden_states |
|
|
|
|
| class LagunaSparseMoeBlock(nn.Module): |
| def __init__(self, config: LagunaConfig): |
| super().__init__() |
| self.experts = LagunaExperts(config) |
| self.gate = LagunaTopKRouter(config) |
| self.shared_experts = LagunaMLP(config, intermediate_size=config.shared_expert_intermediate_size) |
| self.routed_scaling_factor = config.moe_routed_scaling_factor |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| batch_size, sequence_length, hidden_dim = hidden_states.shape |
| hidden_states = hidden_states.view(-1, hidden_dim) |
| shared_output = self.shared_experts(hidden_states) |
|
|
| _, routing_weights, selected_experts = self.gate(hidden_states) |
| hidden_states = self.experts(hidden_states, selected_experts, routing_weights) |
| |
| hidden_states = hidden_states * self.routed_scaling_factor |
| hidden_states = hidden_states + shared_output |
|
|
| hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim) |
| return hidden_states |
|
|
|
|
| def rotate_half(x): |
| """Rotates half the hidden dims of the input.""" |
| x1 = x[..., : x.shape[-1] // 2] |
| x2 = x[..., x.shape[-1] // 2 :] |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| |
| def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1): |
| """Applies Rotary Position Embedding to the query and key tensors. |
| |
| Removes the interleaving of cos and sin from GLM |
| |
| Args: |
| q (`torch.Tensor`): The query tensor. |
| k (`torch.Tensor`): The key tensor. |
| cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| sin (`torch.Tensor`): The sine part of the rotary embedding. |
| unsqueeze_dim (`int`, *optional*, defaults to 1): |
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| Returns: |
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| """ |
| cos = cos.unsqueeze(unsqueeze_dim) |
| sin = sin.unsqueeze(unsqueeze_dim) |
|
|
| |
| rotary_dim = cos.shape[-1] |
| q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] |
| k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] |
|
|
| |
| q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) |
| k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) |
|
|
| |
| q_embed = torch.cat([q_embed, q_pass], dim=-1) |
| k_embed = torch.cat([k_embed, k_pass], dim=-1) |
| return q_embed, k_embed |
|
|
|
|
| def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| """ |
| This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| """ |
| batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| if n_rep == 1: |
| return hidden_states |
| hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
|
|
|
|
| def eager_attention_forward( |
| module: nn.Module, |
| query: torch.Tensor, |
| key: torch.Tensor, |
| value: torch.Tensor, |
| attention_mask: torch.Tensor | None, |
| scaling: float, |
| dropout: float = 0.0, |
| **kwargs: Unpack[TransformersKwargs], |
| ): |
| key_states = repeat_kv(key, module.num_key_value_groups) |
| value_states = repeat_kv(value, module.num_key_value_groups) |
|
|
| attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| if attention_mask is not None: |
| attn_weights = attn_weights + attention_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| attn_output = torch.matmul(attn_weights, value_states) |
| attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
| return attn_output, attn_weights |
|
|
|
|
| @use_kernelized_func(apply_rotary_pos_emb) |
| class LagunaAttention(nn.Module): |
| """Afmoe-style SWA/GQA attention with Laguna-specific gating and per-layer head count.""" |
|
|
| def __init__(self, config: LagunaConfig, layer_idx: int, num_heads: int): |
| super().__init__() |
| |
| self.num_heads = num_heads |
| self.config = config |
| self.layer_idx = layer_idx |
| self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) |
| self.num_key_value_groups = self.num_heads // config.num_key_value_heads |
| self.scaling = self.head_dim**-0.5 |
| self.attention_dropout = config.attention_dropout |
| self.is_causal = True |
|
|
| |
| self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) |
| self.k_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.v_proj = nn.Linear( |
| config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias |
| ) |
| self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=config.attention_bias) |
| |
| |
| self.is_local_attention = config.layer_types[layer_idx] == "sliding_attention" |
| self.sliding_window = config.sliding_window if self.is_local_attention else None |
|
|
| self.q_norm = LagunaRMSNorm(self.head_dim, eps=config.rms_norm_eps) |
| self.k_norm = LagunaRMSNorm(self.head_dim, eps=config.rms_norm_eps) |
| self.g_proj = nn.Linear(config.hidden_size, self.num_heads, bias=False) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], |
| attention_mask: torch.Tensor | None, |
| past_key_values: Cache | None = None, |
| **kwargs: Unpack[FlashAttentionKwargs], |
| ) -> tuple[torch.Tensor, torch.Tensor | None]: |
| input_shape = hidden_states.shape[:-1] |
| hidden_shape = (*input_shape, -1, self.head_dim) |
|
|
| query_states = self.q_proj(hidden_states).view(hidden_shape) |
| key_states = self.k_proj(hidden_states).view(hidden_shape) |
| value_states = self.v_proj(hidden_states).view(hidden_shape) |
|
|
| query_states = self.q_norm(query_states).transpose(1, 2) |
| key_states = self.k_norm(key_states).transpose(1, 2) |
| value_states = value_states.transpose(1, 2) |
|
|
| cos, sin = position_embeddings |
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
|
|
| if past_key_values is not None: |
| key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx) |
|
|
| attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( |
| self.config._attn_implementation, eager_attention_forward |
| ) |
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| sliding_window=self.sliding_window, |
| **kwargs, |
| ) |
|
|
| attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
|
|
| gate = F.softplus(self.g_proj(hidden_states).float()).to(attn_output.dtype) |
| attn_output = (attn_output.view(*input_shape, -1, self.head_dim) * gate.unsqueeze(-1)).view(*input_shape, -1) |
|
|
| attn_output = self.o_proj(attn_output) |
| return attn_output, attn_weights |
|
|
|
|
| class LagunaDecoderLayer(GradientCheckpointingLayer): |
| def __init__(self, config: LagunaConfig, layer_idx: int): |
| super().__init__() |
| self.hidden_size = config.hidden_size |
| self.self_attn = LagunaAttention(config, layer_idx, config.num_attention_heads_per_layer[layer_idx]) |
| if config.mlp_layer_types[layer_idx] == "sparse": |
| self.mlp = LagunaSparseMoeBlock(config) |
| else: |
| self.mlp = LagunaMLP(config, intermediate_size=config.intermediate_size) |
| self.input_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.post_attention_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor | None = None, |
| position_ids: torch.LongTensor | None = None, |
| past_key_values: Cache | None = None, |
| use_cache: bool | None = False, |
| position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> torch.Tensor: |
| residual = hidden_states |
| hidden_states = self.input_layernorm(hidden_states) |
| |
| hidden_states, _ = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| position_embeddings=position_embeddings, |
| **kwargs, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| |
| residual = hidden_states |
| hidden_states = self.post_attention_layernorm(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
| return hidden_states |
|
|
|
|
| @auto_docstring |
| class LagunaPreTrainedModel(PreTrainedModel): |
| config: LagunaConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["LagunaDecoderLayer"] |
| _skip_keys_device_placement = ["past_key_values"] |
| _supports_flash_attn = True |
| _supports_sdpa = True |
| _supports_flex_attn = True |
|
|
| _can_compile_fullgraph = True |
| _supports_attention_backend = True |
| _can_record_outputs = { |
| "router_logits": OutputRecorder(LagunaTopKRouter, index=0), |
| "hidden_states": LagunaDecoderLayer, |
| "attentions": LagunaAttention, |
| } |
|
|
| @torch.no_grad() |
| def _init_weights(self, module): |
| super()._init_weights(module) |
| std = self.config.initializer_range |
| if isinstance(module, LagunaExperts): |
| init.normal_(module.gate_up_proj, mean=0.0, std=std) |
| init.normal_(module.down_proj, mean=0.0, std=std) |
| elif isinstance(module, LagunaTopKRouter): |
| init.normal_(module.weight, mean=0.0, std=std) |
| if isinstance(module, LagunaTopKRouter): |
| torch.nn.init.zeros_(module.e_score_correction_bias) |
| elif isinstance(module, LagunaRotaryEmbedding): |
| for layer_type in module.layer_types: |
| rope_init_fn = module.compute_default_rope_parameters |
| if module.rope_type[layer_type] != "default": |
| rope_init_fn = ROPE_INIT_FUNCTIONS[module.rope_type[layer_type]] |
| curr_inv_freq, _ = rope_init_fn(module.config, layer_type=layer_type) |
| init.copy_(getattr(module, f"{layer_type}_inv_freq"), curr_inv_freq) |
| init.copy_(getattr(module, f"{layer_type}_original_inv_freq"), curr_inv_freq) |
|
|
|
|
| @auto_docstring |
| class LagunaModel(LagunaPreTrainedModel): |
| def __init__(self, config: LagunaConfig): |
| super().__init__(config) |
| self.padding_idx = config.pad_token_id |
| self.vocab_size = config.vocab_size |
|
|
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) |
| self.layers = nn.ModuleList( |
| [LagunaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] |
| ) |
| self.norm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.rotary_emb = LagunaRotaryEmbedding(config=config) |
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| @capture_outputs |
| @auto_docstring |
| def forward( |
| self, |
| input_ids: torch.LongTensor | None = None, |
| attention_mask: torch.Tensor | None = None, |
| position_ids: torch.LongTensor | None = None, |
| past_key_values: Cache | None = None, |
| inputs_embeds: torch.FloatTensor | None = None, |
| use_cache: bool | None = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> MoeModelOutputWithPast: |
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError("You must specify exactly one of input_ids or inputs_embeds") |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input_ids) |
|
|
| if use_cache and past_key_values is None: |
| past_key_values = DynamicCache(config=self.config) |
|
|
| if position_ids is None: |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens |
| position_ids = position_ids.unsqueeze(0) |
|
|
| if not isinstance(causal_mask_mapping := attention_mask, dict): |
| mask_kwargs = { |
| "config": self.config, |
| "inputs_embeds": inputs_embeds, |
| "attention_mask": attention_mask, |
| "past_key_values": past_key_values, |
| "position_ids": position_ids, |
| } |
| mask_creation_functions = { |
| "full_attention": lambda: create_causal_mask(**mask_kwargs), |
| "sliding_attention": lambda: create_sliding_window_causal_mask(**mask_kwargs), |
| } |
| causal_mask_mapping = {} |
| for layer_type in set(self.config.layer_types): |
| causal_mask_mapping[layer_type] = mask_creation_functions[layer_type]() |
|
|
| hidden_states = inputs_embeds |
| position_embeddings = {} |
| for layer_type in set(self.config.layer_types): |
| position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type) |
|
|
| for i, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]): |
| hidden_states = decoder_layer( |
| hidden_states, |
| attention_mask=causal_mask_mapping[self.config.layer_types[i]], |
| position_embeddings=position_embeddings[self.config.layer_types[i]], |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| **kwargs, |
| ) |
|
|
| hidden_states = self.norm(hidden_states) |
|
|
| return MoeModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=past_key_values if use_cache else None, |
| ) |
|
|
|
|
| def load_balancing_loss_func( |
| gate_logits: torch.Tensor | tuple[torch.Tensor] | None, |
| num_experts: int | None = None, |
| top_k=2, |
| attention_mask: torch.Tensor | None = None, |
| ) -> torch.Tensor | int: |
| r""" |
| Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. |
| |
| See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss |
| function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between |
| experts is too unbalanced. |
| |
| Args: |
| gate_logits: |
| Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of |
| shape [batch_size X sequence_length, num_experts]. |
| num_experts: |
| Number of experts |
| top_k: |
| The number of experts to route per-token, can be also interpreted as the `top-k` routing |
| parameter. |
| attention_mask (`torch.Tensor`, *optional*): |
| The attention_mask used in forward function |
| shape [batch_size X sequence_length] if not None. |
| |
| Returns: |
| The auxiliary loss. |
| """ |
| if gate_logits is None or not isinstance(gate_logits, tuple): |
| return 0 |
|
|
| if isinstance(gate_logits, tuple): |
| compute_device = gate_logits[0].device |
| concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) |
|
|
| routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) |
|
|
| _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) |
|
|
| expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) |
|
|
| if attention_mask is None: |
| |
| tokens_per_expert = torch.mean(expert_mask.float(), dim=0) |
|
|
| |
| router_prob_per_expert = torch.mean(routing_weights, dim=0) |
| else: |
| batch_size, sequence_length = attention_mask.shape |
| num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) |
|
|
| |
| expert_attention_mask = ( |
| attention_mask[None, :, :, None, None] |
| .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) |
| .reshape(-1, top_k, num_experts) |
| .to(compute_device) |
| ) |
|
|
| |
| tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( |
| expert_attention_mask, dim=0 |
| ) |
|
|
| |
| router_per_expert_attention_mask = ( |
| attention_mask[None, :, :, None] |
| .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) |
| .reshape(-1, num_experts) |
| .to(compute_device) |
| ) |
|
|
| |
| router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( |
| router_per_expert_attention_mask, dim=0 |
| ) |
|
|
| overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) |
| return overall_loss * num_experts |
|
|
|
|
| @auto_docstring |
| class LagunaForCausalLM(LagunaPreTrainedModel, GenerationMixin): |
| _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} |
| _tp_plan = {"lm_head": "colwise_gather_output"} |
| _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.model = LagunaModel(config) |
| self.vocab_size = config.vocab_size |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
| self.router_aux_loss_coef = config.router_aux_loss_coef |
| self.num_experts = config.num_experts |
| self.num_experts_per_tok = config.num_experts_per_tok |
|
|
| |
| self.post_init() |
|
|
| @can_return_tuple |
| @auto_docstring |
| def forward( |
| self, |
| input_ids: torch.LongTensor | None = None, |
| attention_mask: torch.Tensor | None = None, |
| position_ids: torch.LongTensor | None = None, |
| past_key_values: Cache | None = None, |
| inputs_embeds: torch.FloatTensor | None = None, |
| labels: torch.LongTensor | None = None, |
| use_cache: bool | None = None, |
| output_router_logits: bool | None = None, |
| logits_to_keep: int | torch.Tensor = 0, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> MoeCausalLMOutputWithPast: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
| """ |
|
|
| output_router_logits = ( |
| output_router_logits if output_router_logits is not None else self.config.output_router_logits |
| ) |
|
|
| |
| outputs: MoeModelOutputWithPast = self.model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_router_logits=output_router_logits, |
| **kwargs, |
| ) |
|
|
| hidden_states = outputs.last_hidden_state |
| |
| slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep |
| logits = self.lm_head(hidden_states[:, slice_indices, :]) |
|
|
| loss = None |
| if labels is not None: |
| loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) |
|
|
| aux_loss = None |
| if output_router_logits: |
| aux_loss = load_balancing_loss_func( |
| outputs.router_logits, |
| self.num_experts, |
| self.num_experts_per_tok, |
| attention_mask, |
| ) |
| if labels is not None: |
| loss += self.router_aux_loss_coef * aux_loss.to(loss.device) |
|
|
| return MoeCausalLMOutputWithPast( |
| loss=loss, |
| aux_loss=aux_loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| router_logits=outputs.router_logits, |
| ) |
|
|
|
|
| __all__ = ["LagunaForCausalLM", "LagunaModel", "LagunaPreTrainedModel"] |
|
|