rtferraz commited on
Commit
b8af57a
·
verified ·
1 Parent(s): 5d065be

End of training

Browse files
Files changed (3) hide show
  1. README.md +54 -0
  2. configuration.py +101 -0
  3. modeling.py +209 -0
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: ecommerce-domain-24m
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # ecommerce-domain-24m
14
+
15
+ This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
16
+
17
+ ## Model description
18
+
19
+ More information needed
20
+
21
+ ## Intended uses & limitations
22
+
23
+ More information needed
24
+
25
+ ## Training and evaluation data
26
+
27
+ More information needed
28
+
29
+ ## Training procedure
30
+
31
+ ### Training hyperparameters
32
+
33
+ The following hyperparameters were used during training:
34
+ - learning_rate: 0.0003
35
+ - train_batch_size: 32
36
+ - eval_batch_size: 32
37
+ - seed: 42
38
+ - gradient_accumulation_steps: 4
39
+ - total_train_batch_size: 128
40
+ - optimizer: Use adamw_torch_fused with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: cosine
42
+ - lr_scheduler_warmup_steps: 200
43
+ - num_epochs: 3
44
+
45
+ ### Training results
46
+
47
+
48
+
49
+ ### Framework versions
50
+
51
+ - Transformers 5.5.0
52
+ - Pytorch 2.10.0+cu128
53
+ - Datasets 4.3.0
54
+ - Tokenizers 0.22.2
configuration.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DomainTransformer Configuration.
3
+
4
+ HF-compatible config following Nubank nuFormer architecture choices:
5
+ - GPT-style causal decoder
6
+ - NoPE (no positional encoding) by default — Kazemnejad et al. 2023
7
+ - Pre-norm (LayerNorm before attention and FFN)
8
+ - Weight-tied embedding ↔ LM head
9
+ - Two reference sizes: 24M (d=512, L=6) and 330M (d=1024, L=24)
10
+ """
11
+
12
+ from transformers import PretrainedConfig
13
+
14
+
15
+ class DomainTransformerConfig(PretrainedConfig):
16
+ """Configuration for DomainTransformer causal language model.
17
+
18
+ This config produces a GPT-style decoder-only Transformer with:
19
+ - No positional encoding (NoPE) by default
20
+ - Pre-norm architecture (LayerNorm before attn/FFN)
21
+ - GELU activation in FFN
22
+ - Weight tying between token embeddings and LM head
23
+
24
+ Predefined sizes following Nubank nuFormer (arXiv:2507.23267):
25
+ - "24m": 6 layers, d_model=512, 8 heads, FFN=2048 (~24M params)
26
+ - "85m": 12 layers, d_model=768, 12 heads, FFN=3072 (~85M params)
27
+ - "330m": 24 layers, d_model=1024, 16 heads, FFN=4096 (~330M params)
28
+
29
+ Args:
30
+ vocab_size: Size of the token vocabulary.
31
+ hidden_size: Dimension of hidden representations (d_model).
32
+ num_hidden_layers: Number of transformer blocks.
33
+ num_attention_heads: Number of attention heads.
34
+ intermediate_size: FFN intermediate dimension (default: 4 * hidden_size).
35
+ hidden_act: Activation function in FFN.
36
+ hidden_dropout_prob: Dropout rate for embeddings and residual connections.
37
+ attention_probs_dropout_prob: Dropout rate for attention weights.
38
+ max_position_embeddings: Maximum sequence length (for buffer sizing, not PE).
39
+ initializer_range: Std for weight initialization (normal distribution).
40
+ layer_norm_eps: Epsilon for LayerNorm.
41
+ use_cache: Whether to return past key values for generation.
42
+ tie_word_embeddings: Whether to tie input/output embeddings.
43
+ """
44
+
45
+ model_type = "domain_transformer"
46
+
47
+ def __init__(
48
+ self,
49
+ vocab_size: int = 32000,
50
+ hidden_size: int = 512,
51
+ num_hidden_layers: int = 6,
52
+ num_attention_heads: int = 8,
53
+ intermediate_size: int = None,
54
+ hidden_act: str = "gelu",
55
+ hidden_dropout_prob: float = 0.0,
56
+ attention_probs_dropout_prob: float = 0.0,
57
+ max_position_embeddings: int = 2048,
58
+ initializer_range: float = 0.02,
59
+ layer_norm_eps: float = 1e-5,
60
+ use_cache: bool = True,
61
+ tie_word_embeddings: bool = True,
62
+ **kwargs,
63
+ ):
64
+ self.vocab_size = vocab_size
65
+ self.hidden_size = hidden_size
66
+ self.num_hidden_layers = num_hidden_layers
67
+ self.num_attention_heads = num_attention_heads
68
+ self.intermediate_size = intermediate_size if intermediate_size is not None else 4 * hidden_size
69
+ self.hidden_act = hidden_act
70
+ self.hidden_dropout_prob = hidden_dropout_prob
71
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
72
+ self.max_position_embeddings = max_position_embeddings
73
+ self.initializer_range = initializer_range
74
+ self.layer_norm_eps = layer_norm_eps
75
+ self.use_cache = use_cache
76
+
77
+ assert hidden_size % num_attention_heads == 0, (
78
+ f"hidden_size ({hidden_size}) must be divisible by "
79
+ f"num_attention_heads ({num_attention_heads})"
80
+ )
81
+
82
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
83
+
84
+ @classmethod
85
+ def from_preset(cls, name: str, vocab_size: int = 32000, **overrides) -> "DomainTransformerConfig":
86
+ """Create config from a named preset.
87
+
88
+ Presets:
89
+ "24m": ~24M params (6 layers, d=512, 8 heads)
90
+ "85m": ~85M params (12 layers, d=768, 12 heads)
91
+ "330m": ~330M params (24 layers, d=1024, 16 heads)
92
+ """
93
+ presets = {
94
+ "24m": dict(hidden_size=512, num_hidden_layers=6, num_attention_heads=8, intermediate_size=2048),
95
+ "85m": dict(hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072),
96
+ "330m": dict(hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096),
97
+ }
98
+ if name not in presets:
99
+ raise ValueError(f"Unknown preset '{name}'. Available: {list(presets.keys())}")
100
+ params = {**presets[name], "vocab_size": vocab_size, **overrides}
101
+ return cls(**params)
modeling.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DomainTransformer Model — GPT-style causal decoder for domain token sequences.
3
+
4
+ Architecture follows:
5
+ - NoPE (no positional encoding) — Kazemnejad et al. 2023 (arXiv:2305.19466)
6
+ - Pre-norm (LayerNorm before attention and FFN) — GPT-2 style
7
+ - F.scaled_dot_product_attention with is_causal=True — auto FlashAttention
8
+ - Weight tying between token embedding and LM head
9
+ - Scaled residual initialization: 1/sqrt(2*N_layers)
10
+
11
+ Reference sizes (Nubank nuFormer, arXiv:2507.23267):
12
+ - 24M: 6 layers, d=512, 8 heads
13
+ - 330M: 24 layers, d=1024, 16 heads
14
+ """
15
+
16
+ import math
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+ from transformers import PreTrainedModel
23
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
24
+
25
+ from .configuration import DomainTransformerConfig
26
+
27
+
28
+ class DomainTransformerAttention(nn.Module):
29
+ """Multi-head self-attention with NoPE.
30
+
31
+ Uses F.scaled_dot_product_attention for automatic FlashAttention/SDPA dispatch.
32
+ No positional encoding — causal masking via is_causal=True.
33
+ """
34
+
35
+ def __init__(self, config: DomainTransformerConfig):
36
+ super().__init__()
37
+ self.hidden_size = config.hidden_size
38
+ self.num_heads = config.num_attention_heads
39
+ self.head_dim = self.hidden_size // self.num_heads
40
+ self.scaling = self.head_dim ** -0.5
41
+
42
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
43
+ self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
44
+ self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
45
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
46
+ self.attn_dropout = config.attention_probs_dropout_prob
47
+
48
+ def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
49
+ B, T, C = hidden_states.shape
50
+ q = self.q_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
51
+ k = self.k_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
52
+ v = self.v_proj(hidden_states).view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
53
+
54
+ # Convert HF-style attention_mask (1=attend, 0=ignore, long) to SDPA format
55
+ sdpa_mask = None
56
+ use_causal = True
57
+ if attention_mask is not None:
58
+ sdpa_mask = attention_mask[:, None, None, :].to(dtype=q.dtype)
59
+ sdpa_mask = (1.0 - sdpa_mask) * torch.finfo(q.dtype).min
60
+ use_causal = False
61
+
62
+ attn_out = F.scaled_dot_product_attention(
63
+ q, k, v, attn_mask=sdpa_mask,
64
+ dropout_p=self.attn_dropout if self.training else 0.0,
65
+ is_causal=use_causal, scale=self.scaling,
66
+ )
67
+ attn_out = attn_out.transpose(1, 2).contiguous().reshape(B, T, C)
68
+ return self.out_proj(attn_out)
69
+
70
+
71
+ class DomainTransformerMLP(nn.Module):
72
+ """Two-layer FFN with GELU activation (GPT-2 style)."""
73
+
74
+ def __init__(self, config: DomainTransformerConfig):
75
+ super().__init__()
76
+ self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=True)
77
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=True)
78
+ self.act = nn.GELU(approximate="tanh")
79
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
80
+
81
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
82
+ return self.dropout(self.down_proj(self.act(self.up_proj(hidden_states))))
83
+
84
+
85
+ class DomainTransformerBlock(nn.Module):
86
+ """Single transformer block with pre-norm architecture."""
87
+
88
+ def __init__(self, config: DomainTransformerConfig):
89
+ super().__init__()
90
+ self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
91
+ self.attn = DomainTransformerAttention(config)
92
+ self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
93
+ self.mlp = DomainTransformerMLP(config)
94
+
95
+ def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
96
+ residual = hidden_states
97
+ hidden_states = self.attn(self.ln_1(hidden_states), attention_mask)
98
+ hidden_states = residual + hidden_states
99
+ residual = hidden_states
100
+ hidden_states = self.mlp(self.ln_2(hidden_states))
101
+ hidden_states = residual + hidden_states
102
+ return hidden_states
103
+
104
+
105
+ class DomainTransformerPreTrainedModel(PreTrainedModel):
106
+ """Base class with weight initialization."""
107
+ config_class = DomainTransformerConfig
108
+ base_model_prefix = "model"
109
+ supports_gradient_checkpointing = True
110
+
111
+ def _init_weights(self, module: nn.Module):
112
+ std = self.config.initializer_range
113
+ if isinstance(module, nn.Linear):
114
+ nn.init.normal_(module.weight, mean=0.0, std=std)
115
+ if module.bias is not None:
116
+ nn.init.zeros_(module.bias)
117
+ elif isinstance(module, nn.Embedding):
118
+ nn.init.normal_(module.weight, mean=0.0, std=std)
119
+ if module.padding_idx is not None:
120
+ nn.init.zeros_(module.weight[module.padding_idx])
121
+ elif isinstance(module, nn.LayerNorm):
122
+ nn.init.zeros_(module.bias)
123
+ nn.init.ones_(module.weight)
124
+
125
+
126
+ class DomainTransformerModel(DomainTransformerPreTrainedModel):
127
+ """The bare DomainTransformer: embeddings + blocks + final layernorm."""
128
+
129
+ def __init__(self, config: DomainTransformerConfig):
130
+ super().__init__(config)
131
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
132
+ self.embed_dropout = nn.Dropout(config.hidden_dropout_prob)
133
+ self.blocks = nn.ModuleList([DomainTransformerBlock(config) for _ in range(config.num_hidden_layers)])
134
+ self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
135
+ self.gradient_checkpointing = False
136
+ self.post_init()
137
+
138
+ def get_input_embeddings(self):
139
+ return self.embed_tokens
140
+
141
+ def set_input_embeddings(self, value):
142
+ self.embed_tokens = value
143
+
144
+ def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, **kwargs):
145
+ if inputs_embeds is None:
146
+ inputs_embeds = self.embed_tokens(input_ids)
147
+ hidden_states = self.embed_dropout(inputs_embeds)
148
+ for block in self.blocks:
149
+ if self.gradient_checkpointing and self.training:
150
+ hidden_states = torch.utils.checkpoint.checkpoint(block, hidden_states, attention_mask, use_reentrant=False)
151
+ else:
152
+ hidden_states = block(hidden_states, attention_mask)
153
+ hidden_states = self.ln_f(hidden_states)
154
+ return BaseModelOutputWithPast(last_hidden_state=hidden_states)
155
+
156
+
157
+ class DomainTransformerForCausalLM(DomainTransformerPreTrainedModel):
158
+ """DomainTransformer with a causal language modeling head.
159
+
160
+ The LM head is weight-tied with the token embedding layer.
161
+ Loss is computed via standard shifted cross-entropy.
162
+ """
163
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
164
+
165
+ def __init__(self, config: DomainTransformerConfig):
166
+ super().__init__(config)
167
+ self.model = DomainTransformerModel(config)
168
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
169
+ self.post_init()
170
+
171
+ def get_input_embeddings(self):
172
+ return self.model.embed_tokens
173
+
174
+ def set_input_embeddings(self, value):
175
+ self.model.embed_tokens = value
176
+
177
+ def get_output_embeddings(self):
178
+ return self.lm_head
179
+
180
+ def set_output_embeddings(self, new_embeddings):
181
+ self.lm_head = new_embeddings
182
+
183
+ def forward(self, input_ids=None, attention_mask=None, labels=None, inputs_embeds=None, **kwargs):
184
+ outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds)
185
+ hidden_states = outputs.last_hidden_state
186
+ logits = self.lm_head(hidden_states)
187
+
188
+ loss = None
189
+ if labels is not None:
190
+ shift_logits = logits[..., :-1, :].contiguous()
191
+ shift_labels = labels[..., 1:].contiguous()
192
+ loss = F.cross_entropy(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1), ignore_index=-100)
193
+
194
+ return CausalLMOutputWithPast(loss=loss, logits=logits)
195
+
196
+ def get_user_embedding(self, input_ids, attention_mask=None):
197
+ """Extract user-level embedding from the last non-padding token."""
198
+ outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
199
+ hidden_states = outputs.last_hidden_state
200
+ if attention_mask is not None:
201
+ seq_lengths = attention_mask.sum(dim=1) - 1
202
+ batch_idx = torch.arange(hidden_states.size(0), device=hidden_states.device)
203
+ return hidden_states[batch_idx, seq_lengths]
204
+ else:
205
+ return hidden_states[:, -1, :]
206
+
207
+
208
+ DomainTransformerConfig.register_for_auto_class()
209
+ DomainTransformerForCausalLM.register_for_auto_class("AutoModelForCausalLM")