rtferraz commited on
Commit
0dec8e4
·
verified ·
1 Parent(s): 15fbfea

Add DomainTransformerForCausalLM — GPT-style NoPE model with SDPA attention, weight tying, HF Trainer compatible

Browse files
src/domain_tokenizer/models/modeling.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DomainTransformer Model — GPT-style causal decoder for domain token sequences.
3
+
4
+ Architecture follows:
5
+ - NoPE (no positional encoding) — Kazemnejad et al. 2023 (arXiv:2305.19466)
6
+ - Pre-norm (LayerNorm before attention and FFN) — GPT-2 style
7
+ - F.scaled_dot_product_attention with is_causal=True — auto FlashAttention
8
+ - Weight tying between token embedding and LM head
9
+ - Scaled residual initialization: 1/sqrt(2*N_layers)
10
+
11
+ Reference sizes (Nubank nuFormer, arXiv:2507.23267):
12
+ - 24M: 6 layers, d=512, 8 heads
13
+ - 330M: 24 layers, d=1024, 16 heads
14
+ """
15
+
16
+ import math
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+ from transformers import PreTrainedModel
23
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
24
+
25
+ from .configuration import DomainTransformerConfig
26
+
27
+
28
+ class DomainTransformerAttention(nn.Module):
29
+ """Multi-head self-attention with NoPE.
30
+
31
+ Uses F.scaled_dot_product_attention for automatic FlashAttention/SDPA dispatch.
32
+ No positional encoding — causal masking via is_causal=True.
33
+ """
34
+
35
+ def __init__(self, config: DomainTransformerConfig):
36
+ super().__init__()
37
+ self.hidden_size = config.hidden_size
38
+ self.num_heads = config.num_attention_heads
39
+ self.head_dim = self.hidden_size // self.num_heads
40
+ self.scaling = self.head_dim ** -0.5
41
+
42
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
43
+ self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
44
+ self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
45
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
46
+ self.attn_dropout = config.attention_probs_dropout_prob
47
+
48
+ def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
49
+ B, T, C = hidden_states.shape
50
+ q = self.q_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
51
+ k = self.k_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
52
+ v = self.v_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
53
+
54
+ # Convert HF-style attention_mask (1=attend, 0=ignore, long) to SDPA format
55
+ sdpa_mask = None
56
+ use_causal = True
57
+ if attention_mask is not None:
58
+ sdpa_mask = attention_mask[:, None, None, :].to(dtype=q.dtype)
59
+ sdpa_mask = (1.0 - sdpa_mask) * torch.finfo(q.dtype).min
60
+ use_causal = False
61
+
62
+ attn_out = F.scaled_dot_product_attention(
63
+ q, k, v, attn_mask=sdpa_mask,
64
+ dropout_p=self.attn_dropout if self.training else 0.0,
65
+ is_causal=use_causal, scale=self.scaling,
66
+ )
67
+ attn_out = attn_out.transpose(1, 2).contiguous().reshape(B, T, C)
68
+ return self.out_proj(attn_out)
69
+
70
+
71
+ class DomainTransformerMLP(nn.Module):
72
+ """Two-layer FFN with GELU activation (GPT-2 style)."""
73
+
74
+ def __init__(self, config: DomainTransformerConfig):
75
+ super().__init__()
76
+ self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=True)
77
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=True)
78
+ self.act = nn.GELU(approximate="tanh")
79
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
80
+
81
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
82
+ return self.dropout(self.down_proj(self.act(self.up_proj(hidden_states))))
83
+
84
+
85
+ class DomainTransformerBlock(nn.Module):
86
+ """Single transformer block with pre-norm architecture."""
87
+
88
+ def __init__(self, config: DomainTransformerConfig):
89
+ super().__init__()
90
+ self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
91
+ self.attn = DomainTransformerAttention(config)
92
+ self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
93
+ self.mlp = DomainTransformerMLP(config)
94
+
95
+ def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
96
+ residual = hidden_states
97
+ hidden_states = self.attn(self.ln_1(hidden_states), attention_mask)
98
+ hidden_states = residual + hidden_states
99
+ residual = hidden_states
100
+ hidden_states = self.mlp(self.ln_2(hidden_states))
101
+ hidden_states = residual + hidden_states
102
+ return hidden_states
103
+
104
+
105
+ class DomainTransformerPreTrainedModel(PreTrainedModel):
106
+ """Base class with weight initialization."""
107
+ config_class = DomainTransformerConfig
108
+ base_model_prefix = "model"
109
+ supports_gradient_checkpointing = True
110
+
111
+ def _init_weights(self, module: nn.Module):
112
+ std = self.config.initializer_range
113
+ if isinstance(module, nn.Linear):
114
+ nn.init.normal_(module.weight, mean=0.0, std=std)
115
+ if module.bias is not None:
116
+ nn.init.zeros_(module.bias)
117
+ elif isinstance(module, nn.Embedding):
118
+ nn.init.normal_(module.weight, mean=0.0, std=std)
119
+ if module.padding_idx is not None:
120
+ nn.init.zeros_(module.weight[module.padding_idx])
121
+ elif isinstance(module, nn.LayerNorm):
122
+ nn.init.zeros_(module.bias)
123
+ nn.init.ones_(module.weight)
124
+
125
+
126
+ class DomainTransformerModel(DomainTransformerPreTrainedModel):
127
+ """The bare DomainTransformer: embeddings + blocks + final layernorm."""
128
+
129
+ def __init__(self, config: DomainTransformerConfig):
130
+ super().__init__(config)
131
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
132
+ self.embed_dropout = nn.Dropout(config.hidden_dropout_prob)
133
+ self.blocks = nn.ModuleList([DomainTransformerBlock(config) for _ in range(config.num_hidden_layers)])
134
+ self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
135
+ self.gradient_checkpointing = False
136
+ self.post_init()
137
+
138
+ def get_input_embeddings(self):
139
+ return self.embed_tokens
140
+
141
+ def set_input_embeddings(self, value):
142
+ self.embed_tokens = value
143
+
144
+ def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, **kwargs):
145
+ if inputs_embeds is None:
146
+ inputs_embeds = self.embed_tokens(input_ids)
147
+ hidden_states = self.embed_dropout(inputs_embeds)
148
+ for block in self.blocks:
149
+ if self.gradient_checkpointing and self.training:
150
+ hidden_states = torch.utils.checkpoint.checkpoint(block, hidden_states, attention_mask, use_reentrant=False)
151
+ else:
152
+ hidden_states = block(hidden_states, attention_mask)
153
+ hidden_states = self.ln_f(hidden_states)
154
+ return BaseModelOutputWithPast(last_hidden_state=hidden_states)
155
+
156
+
157
+ class DomainTransformerForCausalLM(DomainTransformerPreTrainedModel):
158
+ """DomainTransformer with a causal language modeling head.
159
+
160
+ The LM head is weight-tied with the token embedding layer.
161
+ Loss is computed via standard shifted cross-entropy.
162
+ """
163
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
164
+
165
+ def __init__(self, config: DomainTransformerConfig):
166
+ super().__init__(config)
167
+ self.model = DomainTransformerModel(config)
168
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
169
+ self.post_init()
170
+
171
+ def get_input_embeddings(self):
172
+ return self.model.embed_tokens
173
+
174
+ def set_input_embeddings(self, value):
175
+ self.model.embed_tokens = value
176
+
177
+ def get_output_embeddings(self):
178
+ return self.lm_head
179
+
180
+ def set_output_embeddings(self, new_embeddings):
181
+ self.lm_head = new_embeddings
182
+
183
+ def forward(self, input_ids=None, attention_mask=None, labels=None, inputs_embeds=None, **kwargs):
184
+ outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds)
185
+ hidden_states = outputs.last_hidden_state
186
+ logits = self.lm_head(hidden_states)
187
+
188
+ loss = None
189
+ if labels is not None:
190
+ shift_logits = logits[..., :-1, :].contiguous()
191
+ shift_labels = labels[..., 1:].contiguous()
192
+ loss = F.cross_entropy(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1), ignore_index=-100)
193
+
194
+ return CausalLMOutputWithPast(loss=loss, logits=logits)
195
+
196
+ def get_user_embedding(self, input_ids, attention_mask=None):
197
+ """Extract user-level embedding from the last non-padding token."""
198
+ outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
199
+ hidden_states = outputs.last_hidden_state
200
+ if attention_mask is not None:
201
+ seq_lengths = attention_mask.sum(dim=1) - 1
202
+ batch_idx = torch.arange(hidden_states.size(0), device=hidden_states.device)
203
+ return hidden_states[batch_idx, seq_lengths]
204
+ else:
205
+ return hidden_states[:, -1, :]
206
+
207
+
208
+ DomainTransformerConfig.register_for_auto_class()
209
+ DomainTransformerForCausalLM.register_for_auto_class("AutoModelForCausalLM")