| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from transformers import PreTrainedModel |
| from transformers.modeling_outputs import CausalLMOutput |
| from .modules import STU, Attention, MLP |
| from .utils import nearest_power_of_two |
| from .layers import STULayer, AttentionLayer |
| from .configuration_ministu import MiniSTUConfig |
| from .filters import get_spectral_filters |
|
|
| try: |
| from liger_kernel.transformers.rms_norm import LigerRMSNorm as TritonNorm |
| triton_norm = True |
| except ImportError as e: |
| print( |
| f"Unable to import Triton-based RMSNorm: {e}. Falling back to PyTorch implementation." |
| ) |
| from torch.nn import RMSNorm |
| triton_norm = False |
| |
| |
| |
| |
| |
| |
| |
|
|
| class MiniSTU(PreTrainedModel): |
| config_class = MiniSTUConfig |
|
|
| def __init__(self, config) -> None: |
| super(MiniSTU, self).__init__(config) |
| self.n_layers = config.n_layers |
| self.n = nearest_power_of_two(config.seq_len * 2 - 1, round_up=True) |
| |
| if isinstance(config.torch_dtype, torch.dtype): |
| torch_dtype = config.torch_dtype |
| else: |
| torch_dtype = getattr(torch, config.torch_dtype) |
|
|
| device = torch.device(config.device) |
|
|
| self.phi = get_spectral_filters( |
| config.seq_len, |
| config.num_eigh, |
| config.use_hankel_L, |
| device=device, |
| dtype=torch_dtype, |
| ) |
| |
| self.use_approx = config.use_approx |
| self.use_hankel_L = config.use_hankel_L |
|
|
| self.tok_emb = nn.Embedding( |
| config.vocab_size, config.n_embd, dtype=torch_dtype, device=device |
| ) |
| self.dropout = nn.Dropout(config.dropout) |
|
|
| self.layers = nn.ModuleList() |
| for layer_idx in range(self.n_layers): |
| if layer_idx % 2 == 0: |
| self.layers.append(STULayer(config, self.phi, self.n)) |
| else: |
| self.layers.append( |
| AttentionLayer(config) |
| if config.use_attn |
| else STULayer(config, self.phi, self.n) |
| ) |
|
|
| self.norm = TritonNorm(config.n_embd) if triton_norm else RMSNorm(config.n_embd) |
|
|
| self.lm_head = nn.Linear( |
| config.n_embd, config.vocab_size, bias=config.bias, dtype=torch_dtype, device=device |
| ) |
| self.tok_emb.weight = self.lm_head.weight |
|
|
| self.std = (config.n_embd) ** -0.5 |
| self.apply(self._init_weights) |
| print("Model Parameter Count: %.2fM\n" % (self._get_num_params() / 1e6,)) |
|
|
| def forward( |
| self, |
| input_ids: torch.Tensor, |
| labels: torch.Tensor = None, |
| **kwargs |
| ) -> CausalLMOutput: |
| |
| tok_emb = self.tok_emb(input_ids) |
| x = self.dropout(tok_emb) |
|
|
| |
| for layer in self.layers: |
| x = layer(x) |
|
|
| |
| x = self.norm(x) |
| logits = self.lm_head(x) |
|
|
| loss = None |
| if labels is not None: |
| |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| loss_fct = nn.CrossEntropyLoss() |
| loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), |
| shift_labels.view(-1) |
| ) |
|
|
| return CausalLMOutput( |
| loss=loss, |
| logits=logits, |
| ) |
|
|
| def _get_num_params(self): |
| n_params = sum(p.numel() for p in self.parameters()) |
| if hasattr(self, "pos_emb") and self.pos_emb is not None: |
| n_params -= self.pos_emb.weight.numel() |
| if self.tok_emb.weight is not self.lm_head.weight: |
| n_params -= self.tok_emb.weight.numel() |
| return n_params |
|
|
| def _init_weights(self, module): |
| if isinstance(module, nn.Linear): |
| if hasattr(module, "SCALE_INIT"): |
| self.std *= (2 * self.n_layers) ** -0.5 |
| torch.nn.init.normal_(module.weight, mean=0.0, std=self.std) |
| if module.bias is not None: |
| torch.nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.Embedding): |
| torch.nn.init.normal_(module.weight, mean=0.0, std=self.std) |
| elif isinstance(module, STU): |
| if self.use_approx: |
| torch.nn.init.xavier_normal_(module.M_inputs) |
| torch.nn.init.xavier_normal_(module.M_filters) |
| else: |
| torch.nn.init.xavier_normal_(module.M_phi_plus) |
| if not self.use_hankel_L: |
| torch.nn.init.xavier_normal_(module.M_phi_minus) |
| elif isinstance(module, Attention): |
| torch.nn.init.xavier_normal_(module.c_attn.weight) |
| torch.nn.init.xavier_normal_(module.c_proj.weight) |
| if module.c_attn.bias is not None: |
| torch.nn.init.zeros_(module.c_attn.bias) |
| if module.c_proj.bias is not None: |
| torch.nn.init.zeros_(module.c_proj.bias) |
| @staticmethod |
| def top_k_top_p_filtering( |
| logits: torch.Tensor, |
| top_k: int = 50, |
| top_p: float = 0.95, |
| filter_value: float = float("-inf"), |
| ): |
| """ |
| Filters a distribution of logits using top-k and/or nucleus (top-p) filtering. |
| """ |
| |
| if top_k > 0: |
| top_k = min(top_k, logits.size(-1)) |
| |
| indices_to_remove = logits < torch.topk(logits, top_k, dim=-1).values[:, -1, None] |
| logits[indices_to_remove] = filter_value |
|
|
| |
| if 0 < top_p < 1.0: |
| sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) |
| cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
|
|
| |
| sorted_indices_to_remove = cumulative_probs > top_p |
| |
| sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() |
| sorted_indices_to_remove[:, 0] = False |
|
|
| indices_to_remove = sorted_indices_to_remove.scatter( |
| dim=1, index=sorted_indices, src=sorted_indices_to_remove |
| ) |
| logits[indices_to_remove] = filter_value |
|
|
| return logits |
|
|
| def generate( |
| self, |
| input_ids: torch.LongTensor, |
| max_new_tokens: int = 50, |
| temperature: float = 0.5, |
| top_k: int = 50, |
| top_p: float = 0.95, |
| eos_token_id: int = None, |
| pad_token_id: int = 0, |
| **kwargs |
| ): |
| """ |
| Naive token-by-token generation loop that uses top-k/top-p filtering and optional temperature. |
| |
| Args: |
| input_ids (torch.LongTensor): shape (batch_size, sequence_length). |
| max_new_tokens (int): max number of tokens to generate (beyond input_ids length). |
| temperature (float): sampling temperature (>=0). |
| top_k (int): Top-K sampling cutoff. |
| top_p (float): Nucleus sampling cutoff. |
| eos_token_id (int): If set, stop generation when this token is produced. |
| pad_token_id (int): If set, can be used to pad sequences. (Not fully used here.) |
| kwargs: Unused arguments (like num_beams) for compatibility. |
| |
| Returns: |
| torch.LongTensor: shape (batch_size, sequence_length + generated_tokens). |
| """ |
| device = input_ids.device |
| |
| |
| |
|
|
| |
| generated_ids = input_ids.clone() |
|
|
| for _ in range(max_new_tokens): |
| |
| outputs = self.forward(generated_ids) |
| logits = outputs.logits[:, -1, :] |
|
|
| |
| if temperature != 1.0: |
| logits = logits / temperature |
|
|
| |
| logits = self.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p) |
|
|
| |
| probabilities = F.softmax(logits, dim=-1) |
|
|
| |
| next_token = torch.multinomial(probabilities, num_samples=1) |
|
|
| |
| generated_ids = torch.cat([generated_ids, next_token], dim=1) |
|
|
| |
| if eos_token_id is not None: |
| |
| |
| if (next_token == eos_token_id).all(): |
| break |
| |
| |
| |
| return generated_ids |