| import math |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| from transformers import PreTrainedModel |
| from transformers.modeling_outputs import CausalLMOutput |
|
|
| from .configuration_minimamba import MiniMambaConfig |
| from .model import Mamba2, Mamba2Config |
|
|
|
|
|
|
| class MiniMamba(PreTrainedModel): |
| """ |
| A Hugging Face–style wrapper around a Mamba2 model, providing: |
| • forward(...) returning a CausalLMOutput |
| • support for HF training loops |
| • a naive generate(...) method with top-k/top-p sampling |
| """ |
| config_class = MiniMambaConfig |
|
|
| def __init__(self, config: MiniMambaConfig) -> None: |
| """ |
| Initialize the MiniMamba model, bridging Mamba2 with HF's PreTrainedModel. |
| """ |
| super().__init__(config) |
|
|
| |
| mamba2_args = Mamba2Config( |
| dim=config.dim, |
| num_layers=config.num_layers, |
| num_heads=config.num_heads, |
| state_dim=config.state_dim, |
| num_groups=config.num_groups, |
| conv_size=config.conv_size, |
| use_mem_eff_path=config.use_mem_eff_path, |
| dt_bias=config.dt_bias, |
| D_has_head_dim=config.D_has_head_dim, |
| learnable_init_states=config.learnable_init_states, |
| ssm_chunk_size=config.ssm_chunk_size, |
| vocab_size=config.vocab_size, |
| ffn_dim_multiplier=config.ffn_dim_multiplier, |
| multiple_of=config.multiple_of, |
| norm_eps=config.norm_eps, |
| init_use_depth=config.init_use_depth, |
| init_base_std=config.init_base_std, |
| init_std_factor=config.init_std_factor, |
| bias=config.bias, |
| |
| |
| seed=config.seed, |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| weight_tying=config.weight_tying if hasattr(config, "weight_tying") else False, |
| torch_dtype=getattr(torch, config.torch_dtype) if isinstance(config.torch_dtype, str) else config.torch_dtype, |
| ) |
|
|
| |
| self.mamba = Mamba2(config=mamba2_args) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| self.device_ = 'cuda' if torch.cuda.is_available() else 'cpu' |
| if isinstance(config.torch_dtype, str): |
| self.dtype_ = getattr(torch, config.torch_dtype) |
| else: |
| self.dtype_ = config.torch_dtype |
| |
| |
| self.apply(self._init_weights) |
|
|
| print("MiniMamba Model Parameter Count: %.2fM\n" % (self._get_num_params() / 1e6,)) |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor, |
| labels: torch.LongTensor = None, |
| **kwargs |
| ) -> CausalLMOutput: |
| """ |
| Forward pass for causal language modeling. |
| Returns a CausalLMOutput that includes loss (if labels is provided) and logits. |
| """ |
| |
| |
| logits = self.mamba(input_ids) |
|
|
| loss = None |
| if labels is not None: |
| |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| loss_fct = nn.CrossEntropyLoss() |
| loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), |
| shift_labels.view(-1) |
| ) |
|
|
| return CausalLMOutput( |
| loss=loss, |
| logits=logits, |
| ) |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| input_ids: torch.LongTensor, |
| max_new_tokens: int = 50, |
| temperature: float = 0.5, |
| top_k: int = 50, |
| top_p: float = 0.95, |
| eos_token_id: int = None, |
| pad_token_id: int = 0, |
| **kwargs |
| ): |
| """ |
| A naive token-by-token generation loop (greedy + top-k/top-p + temperature). |
| """ |
| |
| generated_ids = input_ids.clone() |
|
|
| for _ in range(max_new_tokens): |
| |
| outputs = self.forward(generated_ids) |
| logits = outputs.logits[:, -1, :] |
|
|
| |
| if temperature != 1.0: |
| logits = logits / temperature |
|
|
| |
| logits = self.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p) |
|
|
| |
| probs = F.softmax(logits, dim=-1) |
| next_token = torch.multinomial(probs, num_samples=1) |
|
|
| |
| generated_ids = torch.cat([generated_ids, next_token], dim=1) |
|
|
| |
| if eos_token_id is not None and (next_token == eos_token_id).all(): |
| break |
|
|
| return generated_ids |
|
|
| @staticmethod |
| def top_k_top_p_filtering( |
| logits: torch.Tensor, |
| top_k: int = 50, |
| top_p: float = 0.95, |
| filter_value: float = float("-inf"), |
| ): |
| """ |
| Filters logits using top-k and/or nucleus (top-p) filtering. |
| """ |
| |
| if top_k > 0: |
| top_k = min(top_k, logits.size(-1)) |
| indices_to_remove = logits < torch.topk(logits, top_k, dim=-1).values[:, -1, None] |
| logits[indices_to_remove] = filter_value |
|
|
| |
| if 0 < top_p < 1.0: |
| sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) |
| cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
|
|
| |
| sorted_indices_to_remove = cumulative_probs > top_p |
|
|
| |
| sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() |
| sorted_indices_to_remove[:, 0] = False |
|
|
| |
| indices_to_remove = sorted_indices_to_remove.scatter( |
| dim=1, index=sorted_indices, src=sorted_indices_to_remove |
| ) |
| logits[indices_to_remove] = filter_value |
|
|
| return logits |
|
|
| def _init_weights(self, module): |
| """ |
| HF calls _init_weights to initialize parameters. |
| If you prefer Mamba’s own init approach, you can call model.mamba.init_weights(). |
| """ |
| |
| |
| if isinstance(module, Mamba2): |
| module.init_weights() |
| elif isinstance(module, nn.Linear): |
| |
| nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| if module.bias is not None: |
| nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.Embedding): |
| nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| |
|
|
| def _get_num_params(self): |
| |
| return sum(p.numel() for p in self.parameters() if p.requires_grad) |
|
|