Text Generation
MLX
Safetensors
minimax_m2
jang
jang-quantized
JANG_2M
mixed-precision
apple-silicon
conversational
custom_code
fp8
Instructions to use bearzi/MiniMax-M2.7-JANG_2M with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- MLX
How to use bearzi/MiniMax-M2.7-JANG_2M with MLX:
# Make sure mlx-lm is installed # pip install --upgrade mlx-lm # Generate text with mlx-lm from mlx_lm import load, generate model, tokenizer = load("bearzi/MiniMax-M2.7-JANG_2M") prompt = "Write a story about Einstein" messages = [{"role": "user", "content": prompt}] prompt = tokenizer.apply_chat_template( messages, add_generation_prompt=True ) text = generate(model, tokenizer, prompt=prompt, verbose=True) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- LM Studio
- Pi new
How to use bearzi/MiniMax-M2.7-JANG_2M with Pi:
Start the MLX server
# Install MLX LM: uv tool install mlx-lm # Start a local OpenAI-compatible server: mlx_lm.server --model "bearzi/MiniMax-M2.7-JANG_2M"
Configure the model in Pi
# Install Pi: npm install -g @mariozechner/pi-coding-agent # Add to ~/.pi/agent/models.json: { "providers": { "mlx-lm": { "baseUrl": "http://localhost:8080/v1", "api": "openai-completions", "apiKey": "none", "models": [ { "id": "bearzi/MiniMax-M2.7-JANG_2M" } ] } } }Run Pi
# Start Pi in your project directory: pi
- Hermes Agent new
How to use bearzi/MiniMax-M2.7-JANG_2M with Hermes Agent:
Start the MLX server
# Install MLX LM: uv tool install mlx-lm # Start a local OpenAI-compatible server: mlx_lm.server --model "bearzi/MiniMax-M2.7-JANG_2M"
Configure Hermes
# Install Hermes: curl -fsSL https://hermes-agent.nousresearch.com/install.sh | bash hermes setup # Point Hermes at the local server: hermes config set model.provider custom hermes config set model.base_url http://127.0.0.1:8080/v1 hermes config set model.default bearzi/MiniMax-M2.7-JANG_2M
Run Hermes
hermes
- MLX LM
How to use bearzi/MiniMax-M2.7-JANG_2M with MLX LM:
Generate or start a chat session
# Install MLX LM uv tool install mlx-lm # Interactive chat REPL mlx_lm.chat --model "bearzi/MiniMax-M2.7-JANG_2M"
Run an OpenAI-compatible server
# Install MLX LM uv tool install mlx-lm # Start the server mlx_lm.server --model "bearzi/MiniMax-M2.7-JANG_2M" # Calling the OpenAI-compatible server with curl curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "bearzi/MiniMax-M2.7-JANG_2M", "messages": [ {"role": "user", "content": "Hello"} ] }'
| # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 | |
| # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py. | |
| # Do NOT edit this file manually as any edits will be overwritten by the generation of | |
| # the file from the modular. If any change should be done, please apply the change to the | |
| # modular_minimax_m2.py file directly. One of our CI enforces this. | |
| # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 | |
| # coding=utf-8 | |
| # Copyright 2025 the HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| from collections.abc import Callable | |
| from typing import Optional, Union, Unpack | |
| import torch | |
| from torch import nn | |
| from transformers.activations import ACT2FN | |
| from transformers.cache_utils import Cache, DynamicCache | |
| from transformers.generation import GenerationMixin | |
| from transformers.integrations import use_kernel_forward_from_hub | |
| from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask | |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs | |
| from transformers.modeling_layers import ( | |
| GenericForQuestionAnswering, | |
| GenericForSequenceClassification, | |
| GenericForTokenClassification, | |
| GradientCheckpointingLayer, | |
| ) | |
| from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast | |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update | |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel | |
| from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple | |
| from transformers.utils.deprecation import deprecate_kwarg | |
| from transformers.utils.generic import OutputRecorder, check_model_inputs | |
| from .configuration_minimax_m2 import MiniMaxM2Config | |
| class MiniMaxM2MLP(nn.Module): | |
| def __init__(self, config: MiniMaxM2Config): | |
| super().__init__() | |
| self.ffn_dim = config.intermediate_size | |
| self.hidden_dim = config.hidden_size | |
| self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) | |
| self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) | |
| self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) | |
| self.act_fn = ACT2FN[config.hidden_act] | |
| def forward(self, hidden_states): | |
| current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states) | |
| current_hidden_states = self.w2(current_hidden_states) | |
| return current_hidden_states | |
| class MiniMaxM2Experts(nn.ModuleList): | |
| """ | |
| ModuleList of experts. | |
| """ | |
| def __init__(self, config: MiniMaxM2Config): | |
| super().__init__() | |
| self.top_k = config.num_experts_per_tok | |
| self.num_experts = config.num_local_experts | |
| for _ in range(self.num_experts): | |
| self.append(MiniMaxM2MLP(config)) | |
| def forward( | |
| self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor | |
| ) -> torch.Tensor: | |
| """ | |
| Args: | |
| hidden_states: (batch_size * sequence_length, hidden_dim) | |
| selected_experts: (batch_size * sequence_length, top_k) | |
| routing_weights: (batch_size * sequence_length, top_k) | |
| Returns: | |
| (batch_size * sequence_length, hidden_dim) | |
| """ | |
| final_hidden_states = torch.zeros_like(hidden_states) | |
| expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0) | |
| expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() | |
| for expert_idx in expert_hit: | |
| idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) | |
| current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1]) | |
| current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None] | |
| final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) | |
| return final_hidden_states | |
| class MiniMaxM2SparseMoeBlock(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.top_k = config.num_experts_per_tok | |
| self.jitter_noise = config.router_jitter_noise | |
| self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False) | |
| self.experts = MiniMaxM2Experts(config) | |
| self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts)) | |
| def route_tokens_to_experts(self, router_logits): | |
| routing_weights = torch.nn.functional.sigmoid(router_logits.float()) | |
| scores_for_choice = routing_weights + self.e_score_correction_bias | |
| _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False) | |
| top_k_weights = routing_weights.gather(1, top_k_index) | |
| top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True) | |
| return top_k_index, top_k_weights.to(router_logits.dtype) | |
| def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: | |
| batch_size, sequence_length, hidden_dim = hidden_states.shape | |
| if self.training and self.jitter_noise > 0: | |
| hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) | |
| hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) | |
| router_logits = self.gate(hidden_states) | |
| top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits) | |
| hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype)) | |
| hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim) | |
| return hidden_states, router_logits | |
| class MiniMaxM2RMSNorm(nn.Module): | |
| def __init__(self, hidden_size, eps=1e-6): | |
| """ | |
| MiniMaxM2RMSNorm is equivalent to T5LayerNorm | |
| """ | |
| super().__init__() | |
| self.weight = nn.Parameter(torch.ones(hidden_size)) | |
| self.variance_epsilon = eps | |
| def forward(self, hidden_states): | |
| input_dtype = hidden_states.dtype | |
| hidden_states = hidden_states.to(torch.float32) | |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) | |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) | |
| return self.weight * hidden_states.to(input_dtype) | |
| def extra_repr(self): | |
| return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" | |
| def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: | |
| """ | |
| This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, | |
| num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) | |
| """ | |
| batch, num_key_value_heads, slen, head_dim = hidden_states.shape | |
| if n_rep == 1: | |
| return hidden_states | |
| hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) | |
| return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) | |
| def eager_attention_forward( | |
| module: nn.Module, | |
| query: torch.Tensor, | |
| key: torch.Tensor, | |
| value: torch.Tensor, | |
| attention_mask: Optional[torch.Tensor], | |
| scaling: float, | |
| dropout: float = 0.0, | |
| **kwargs: Unpack[TransformersKwargs], | |
| ): | |
| key_states = repeat_kv(key, module.num_key_value_groups) | |
| value_states = repeat_kv(value, module.num_key_value_groups) | |
| attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling | |
| if attention_mask is not None: | |
| causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] | |
| attn_weights = attn_weights + causal_mask | |
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) | |
| attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) | |
| attn_output = torch.matmul(attn_weights, value_states) | |
| attn_output = attn_output.transpose(1, 2).contiguous() | |
| return attn_output, attn_weights | |
| def rotate_half(x): | |
| """Rotates half the hidden dims of the input.""" | |
| x1 = x[..., : x.shape[-1] // 2] | |
| x2 = x[..., x.shape[-1] // 2 :] | |
| return torch.cat((-x2, x1), dim=-1) | |
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): | |
| """Applies Rotary Position Embedding to the query and key tensors. | |
| Args: | |
| q (`torch.Tensor`): The query tensor. | |
| k (`torch.Tensor`): The key tensor. | |
| cos (`torch.Tensor`): The cosine part of the rotary embedding. | |
| sin (`torch.Tensor`): The sine part of the rotary embedding. | |
| position_ids (`torch.Tensor`, *optional*): | |
| Deprecated and unused. | |
| unsqueeze_dim (`int`, *optional*, defaults to 1): | |
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and | |
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note | |
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and | |
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes | |
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have | |
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. | |
| Returns: | |
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. | |
| """ | |
| cos = cos.unsqueeze(unsqueeze_dim) | |
| sin = sin.unsqueeze(unsqueeze_dim) | |
| # Keep half or full tensor for later concatenation | |
| rotary_dim = cos.shape[-1] | |
| q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] | |
| k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] | |
| # Apply rotary embeddings on the first half or full tensor | |
| q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) | |
| k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) | |
| # Concatenate back to full shape | |
| q_embed = torch.cat([q_embed, q_pass], dim=-1) | |
| k_embed = torch.cat([k_embed, k_pass], dim=-1) | |
| return q_embed, k_embed | |
| class MiniMaxM2Attention(nn.Module): | |
| """Multi-headed attention from 'Attention Is All You Need' paper""" | |
| def __init__(self, config: MiniMaxM2Config, layer_idx: int): | |
| super().__init__() | |
| self.config = config | |
| self.layer_idx = layer_idx | |
| self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads | |
| self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads | |
| self.scaling = self.head_dim**-0.5 | |
| self.attention_dropout = config.attention_dropout | |
| self.is_causal = True | |
| self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) | |
| self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) | |
| self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) | |
| self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) | |
| self.use_qk_norm = config.use_qk_norm | |
| if self.use_qk_norm: | |
| self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps) | |
| self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], | |
| attention_mask: Optional[torch.Tensor], | |
| past_key_values: Optional[Cache] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| **kwargs: Unpack[FlashAttentionKwargs], | |
| ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: | |
| input_shape = hidden_states.shape[:-1] | |
| hidden_shape = (*input_shape, -1, self.head_dim) | |
| query_states = self.q_proj(hidden_states) | |
| key_states = self.k_proj(hidden_states) | |
| value_states = self.v_proj(hidden_states) | |
| if self.use_qk_norm: # main diff from Llama | |
| query_states = self.q_norm(query_states) | |
| key_states = self.k_norm(key_states) | |
| key_states = key_states.view(hidden_shape) | |
| query_states = query_states.view(hidden_shape) | |
| value_states = value_states.view(hidden_shape) | |
| query_states = query_states.transpose(1, 2) | |
| key_states = key_states.transpose(1, 2) | |
| value_states = value_states.transpose(1, 2) | |
| cos, sin = position_embeddings | |
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) | |
| if past_key_values is not None: | |
| # sin and cos are specific to RoPE models; position_ids needed for the static cache | |
| cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} | |
| key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) | |
| attention_interface: Callable = eager_attention_forward | |
| if self.config._attn_implementation != "eager": | |
| attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] | |
| attn_output, attn_weights = attention_interface( | |
| self, | |
| query_states, | |
| key_states, | |
| value_states, | |
| attention_mask, | |
| dropout=0.0 if not self.training else self.attention_dropout, | |
| scaling=self.scaling, | |
| **kwargs, | |
| ) | |
| attn_output = attn_output.reshape(*input_shape, -1).contiguous() | |
| attn_output = self.o_proj(attn_output) | |
| return attn_output, attn_weights | |
| class MiniMaxM2DecoderLayer(GradientCheckpointingLayer): | |
| def __init__(self, config: MiniMaxM2Config, layer_idx: int): | |
| super().__init__() | |
| self.hidden_size = config.hidden_size | |
| self.self_attn = MiniMaxM2Attention(config, layer_idx) | |
| self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config) | |
| self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| position_embeddings: tuple[torch.Tensor, torch.Tensor], | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Cache] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| **kwargs: Unpack[TransformersKwargs], | |
| ) -> torch.FloatTensor: | |
| residual = hidden_states | |
| hidden_states = self.input_layernorm(hidden_states) | |
| # Self Attention | |
| hidden_states, _ = self.self_attn( | |
| hidden_states=hidden_states, | |
| position_embeddings=position_embeddings, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| past_key_values=past_key_values, | |
| cache_position=cache_position, | |
| **kwargs, | |
| ) | |
| hidden_states = residual + hidden_states | |
| # Fully Connected | |
| residual = hidden_states | |
| hidden_states = self.post_attention_layernorm(hidden_states) | |
| hidden_states, _ = self.block_sparse_moe(hidden_states) | |
| hidden_states = residual + hidden_states | |
| return hidden_states | |
| class MiniMaxM2RotaryEmbedding(nn.Module): | |
| inv_freq: torch.Tensor # fix linting for `register_buffer` | |
| def __init__(self, config: MiniMaxM2Config, device=None): | |
| super().__init__() | |
| # BC: "rope_type" was originally "type" | |
| if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): | |
| self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) | |
| else: | |
| self.rope_type = "default" | |
| self.max_seq_len_cached = config.max_position_embeddings | |
| self.original_max_seq_len = config.max_position_embeddings | |
| self.config = config | |
| self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] | |
| inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) | |
| self.register_buffer("inv_freq", inv_freq, persistent=False) | |
| self.original_inv_freq = self.inv_freq | |
| # power user: used with advanced RoPE types (e.g. dynamic rope) | |
| def forward(self, x, position_ids): | |
| inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) | |
| position_ids_expanded = position_ids[:, None, :].float() | |
| device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" | |
| with torch.autocast(device_type=device_type, enabled=False): # Force float32 | |
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) | |
| emb = torch.cat((freqs, freqs), dim=-1) | |
| cos = emb.cos() * self.attention_scaling | |
| sin = emb.sin() * self.attention_scaling | |
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) | |
| class MiniMaxM2PreTrainedModel(PreTrainedModel): | |
| config: MiniMaxM2Config | |
| base_model_prefix = "model" | |
| supports_gradient_checkpointing = True | |
| _no_split_modules = ["MiniMaxM2DecoderLayer"] | |
| _skip_keys_device_placement = ["past_key_values"] | |
| _supports_flash_attn = True | |
| _supports_sdpa = True | |
| _supports_flex_attn = True | |
| _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) | |
| _supports_attention_backend = True | |
| _can_record_outputs = { | |
| "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1), | |
| "hidden_states": MiniMaxM2DecoderLayer, | |
| "attentions": MiniMaxM2Attention, | |
| } | |
| class MiniMaxM2Model(MiniMaxM2PreTrainedModel): | |
| def __init__(self, config: MiniMaxM2Config): | |
| super().__init__(config) | |
| self.padding_idx = config.pad_token_id | |
| self.vocab_size = config.vocab_size | |
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | |
| self.layers = nn.ModuleList( | |
| [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | |
| ) | |
| self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config) | |
| self.gradient_checkpointing = False | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Cache] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| **kwargs: Unpack[TransformersKwargs], | |
| ) -> MoeModelOutputWithPast: | |
| if (input_ids is None) ^ (inputs_embeds is not None): | |
| raise ValueError("You must specify exactly one of input_ids or inputs_embeds") | |
| if use_cache and past_key_values is None: | |
| past_key_values = DynamicCache(config=self.config) | |
| if inputs_embeds is None: | |
| inputs_embeds = self.embed_tokens(input_ids) | |
| if cache_position is None: | |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 | |
| cache_position = torch.arange( | |
| past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device | |
| ) | |
| if position_ids is None: | |
| position_ids = cache_position.unsqueeze(0) | |
| mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask | |
| causal_mask = mask_function( | |
| config=self.config, | |
| input_embeds=inputs_embeds, | |
| attention_mask=attention_mask, | |
| cache_position=cache_position, | |
| past_key_values=past_key_values, | |
| position_ids=position_ids, | |
| ) | |
| hidden_states = inputs_embeds | |
| # create position embeddings to be shared across the decoder layers | |
| position_embeddings = self.rotary_emb(hidden_states, position_ids) | |
| for decoder_layer in self.layers[: self.config.num_hidden_layers]: | |
| hidden_states = decoder_layer( | |
| hidden_states, | |
| position_embeddings=position_embeddings, | |
| attention_mask=causal_mask, | |
| position_ids=position_ids, | |
| past_key_values=past_key_values, | |
| use_cache=use_cache, | |
| cache_position=cache_position, | |
| **kwargs, | |
| ) | |
| hidden_states = self.norm(hidden_states) | |
| return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE | |
| last_hidden_state=hidden_states, | |
| past_key_values=past_key_values, | |
| ) | |
| def load_balancing_loss_func( | |
| gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], | |
| num_experts: Optional[int] = None, | |
| top_k=2, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| ) -> Union[torch.Tensor, int]: | |
| r""" | |
| Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. | |
| See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss | |
| function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between | |
| experts is too unbalanced. | |
| Args: | |
| gate_logits: | |
| Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of | |
| shape [batch_size X sequence_length, num_experts]. | |
| num_experts: | |
| Number of experts | |
| top_k: | |
| The number of experts to route per-token, can be also interpreted as the `top-k` routing | |
| parameter. | |
| attention_mask (`torch.Tensor`, *optional*): | |
| The attention_mask used in forward function | |
| shape [batch_size X sequence_length] if not None. | |
| Returns: | |
| The auxiliary loss. | |
| """ | |
| if gate_logits is None or not isinstance(gate_logits, tuple): | |
| return 0 | |
| if isinstance(gate_logits, tuple): | |
| compute_device = gate_logits[0].device | |
| concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) | |
| routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) | |
| _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) | |
| expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) | |
| if attention_mask is None: | |
| # Compute the percentage of tokens routed to each experts | |
| tokens_per_expert = torch.mean(expert_mask.float(), dim=0) | |
| # Compute the average probability of routing to these experts | |
| router_prob_per_expert = torch.mean(routing_weights, dim=0) | |
| else: | |
| batch_size, sequence_length = attention_mask.shape | |
| num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) | |
| # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask | |
| expert_attention_mask = ( | |
| attention_mask[None, :, :, None, None] | |
| .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) | |
| .reshape(-1, top_k, num_experts) | |
| .to(compute_device) | |
| ) | |
| # Compute the percentage of tokens routed to each experts | |
| tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( | |
| expert_attention_mask, dim=0 | |
| ) | |
| # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert | |
| router_per_expert_attention_mask = ( | |
| attention_mask[None, :, :, None] | |
| .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) | |
| .reshape(-1, num_experts) | |
| .to(compute_device) | |
| ) | |
| # Compute the average probability of routing to these experts | |
| router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( | |
| router_per_expert_attention_mask, dim=0 | |
| ) | |
| overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) | |
| return overall_loss * num_experts | |
| class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin): | |
| _tied_weights_keys = ["lm_head.weight"] | |
| _tp_plan = {"lm_head": "colwise_rep"} | |
| _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.model = MiniMaxM2Model(config) | |
| self.vocab_size = config.vocab_size | |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
| self.router_aux_loss_coef = config.router_aux_loss_coef | |
| self.num_experts = config.num_local_experts | |
| self.num_experts_per_tok = config.num_experts_per_tok | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Cache] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_router_logits: Optional[bool] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| logits_to_keep: Union[int, torch.Tensor] = 0, | |
| **kwargs: Unpack[TransformersKwargs], | |
| ) -> MoeCausalLMOutputWithPast: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., | |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored | |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. | |
| Example: | |
| ```python | |
| >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM | |
| >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1") | |
| >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1") | |
| >>> prompt = "Hey, are you conscious? Can you talk to me?" | |
| >>> inputs = tokenizer(prompt, return_tensors="pt") | |
| >>> # Generate | |
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30) | |
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." | |
| ```""" | |
| output_router_logits = ( | |
| output_router_logits if output_router_logits is not None else self.config.output_router_logits | |
| ) | |
| # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) | |
| outputs: MoeModelOutputWithPast = self.model( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| use_cache=use_cache, | |
| output_router_logits=output_router_logits, | |
| cache_position=cache_position, | |
| **kwargs, | |
| ) | |
| hidden_states = outputs.last_hidden_state | |
| # Only compute necessary logits, and do not upcast them to float if we are not computing the loss | |
| slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep | |
| logits = self.lm_head(hidden_states[:, slice_indices, :]) | |
| loss = None | |
| if labels is not None: | |
| loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) | |
| aux_loss = None | |
| if output_router_logits: | |
| aux_loss = load_balancing_loss_func( | |
| outputs.router_logits, | |
| self.num_experts, | |
| self.num_experts_per_tok, | |
| attention_mask, | |
| ) | |
| if labels is not None: | |
| loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device | |
| return MoeCausalLMOutputWithPast( | |
| loss=loss, | |
| aux_loss=aux_loss, | |
| logits=logits, | |
| past_key_values=outputs.past_key_values, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| router_logits=outputs.router_logits, | |
| ) | |
| class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel): | |
| pass | |
| class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel): | |
| pass | |
| class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel): | |
| pass | |
| __all__ = [ | |
| "MiniMaxM2ForCausalLM", | |
| "MiniMaxM2ForQuestionAnswering", | |
| "MiniMaxM2Model", | |
| "MiniMaxM2PreTrainedModel", | |
| "MiniMaxM2ForSequenceClassification", | |
| "MiniMaxM2ForTokenClassification", | |
| ] | |