rtferraz commited on
Commit
15fbfea
·
verified ·
1 Parent(s): 2f5969e

Add DomainTransformerConfig with presets (24M/85M/330M)

Browse files
src/domain_tokenizer/models/configuration.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DomainTransformer Configuration.
3
+
4
+ HF-compatible config following Nubank nuFormer architecture choices:
5
+ - GPT-style causal decoder
6
+ - NoPE (no positional encoding) by default — Kazemnejad et al. 2023
7
+ - Pre-norm (LayerNorm before attention and FFN)
8
+ - Weight-tied embedding ↔ LM head
9
+ - Two reference sizes: 24M (d=512, L=6) and 330M (d=1024, L=24)
10
+ """
11
+
12
+ from transformers import PretrainedConfig
13
+
14
+
15
+ class DomainTransformerConfig(PretrainedConfig):
16
+ """Configuration for DomainTransformer causal language model.
17
+
18
+ This config produces a GPT-style decoder-only Transformer with:
19
+ - No positional encoding (NoPE) by default
20
+ - Pre-norm architecture (LayerNorm before attn/FFN)
21
+ - GELU activation in FFN
22
+ - Weight tying between token embeddings and LM head
23
+
24
+ Predefined sizes following Nubank nuFormer (arXiv:2507.23267):
25
+ - "24m": 6 layers, d_model=512, 8 heads, FFN=2048 (~24M params)
26
+ - "85m": 12 layers, d_model=768, 12 heads, FFN=3072 (~85M params)
27
+ - "330m": 24 layers, d_model=1024, 16 heads, FFN=4096 (~330M params)
28
+
29
+ Args:
30
+ vocab_size: Size of the token vocabulary.
31
+ hidden_size: Dimension of hidden representations (d_model).
32
+ num_hidden_layers: Number of transformer blocks.
33
+ num_attention_heads: Number of attention heads.
34
+ intermediate_size: FFN intermediate dimension (default: 4 * hidden_size).
35
+ hidden_act: Activation function in FFN.
36
+ hidden_dropout_prob: Dropout rate for embeddings and residual connections.
37
+ attention_probs_dropout_prob: Dropout rate for attention weights.
38
+ max_position_embeddings: Maximum sequence length (for buffer sizing, not PE).
39
+ initializer_range: Std for weight initialization (normal distribution).
40
+ layer_norm_eps: Epsilon for LayerNorm.
41
+ use_cache: Whether to return past key values for generation.
42
+ tie_word_embeddings: Whether to tie input/output embeddings.
43
+ """
44
+
45
+ model_type = "domain_transformer"
46
+
47
+ def __init__(
48
+ self,
49
+ vocab_size: int = 32000,
50
+ hidden_size: int = 512,
51
+ num_hidden_layers: int = 6,
52
+ num_attention_heads: int = 8,
53
+ intermediate_size: int = None,
54
+ hidden_act: str = "gelu",
55
+ hidden_dropout_prob: float = 0.0,
56
+ attention_probs_dropout_prob: float = 0.0,
57
+ max_position_embeddings: int = 2048,
58
+ initializer_range: float = 0.02,
59
+ layer_norm_eps: float = 1e-5,
60
+ use_cache: bool = True,
61
+ tie_word_embeddings: bool = True,
62
+ **kwargs,
63
+ ):
64
+ self.vocab_size = vocab_size
65
+ self.hidden_size = hidden_size
66
+ self.num_hidden_layers = num_hidden_layers
67
+ self.num_attention_heads = num_attention_heads
68
+ self.intermediate_size = intermediate_size if intermediate_size is not None else 4 * hidden_size
69
+ self.hidden_act = hidden_act
70
+ self.hidden_dropout_prob = hidden_dropout_prob
71
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
72
+ self.max_position_embeddings = max_position_embeddings
73
+ self.initializer_range = initializer_range
74
+ self.layer_norm_eps = layer_norm_eps
75
+ self.use_cache = use_cache
76
+
77
+ assert hidden_size % num_attention_heads == 0, (
78
+ f"hidden_size ({hidden_size}) must be divisible by "
79
+ f"num_attention_heads ({num_attention_heads})"
80
+ )
81
+
82
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
83
+
84
+ @classmethod
85
+ def from_preset(cls, name: str, vocab_size: int = 32000, **overrides) -> "DomainTransformerConfig":
86
+ """Create config from a named preset.
87
+
88
+ Presets:
89
+ "24m": ~24M params (6 layers, d=512, 8 heads)
90
+ "85m": ~85M params (12 layers, d=768, 12 heads)
91
+ "330m": ~330M params (24 layers, d=1024, 16 heads)
92
+ """
93
+ presets = {
94
+ "24m": dict(hidden_size=512, num_hidden_layers=6, num_attention_heads=8, intermediate_size=2048),
95
+ "85m": dict(hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072),
96
+ "330m": dict(hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096),
97
+ }
98
+ if name not in presets:
99
+ raise ValueError(f"Unknown preset '{name}'. Available: {list(presets.keys())}")
100
+ params = {**presets[name], "vocab_size": vocab_size, **overrides}
101
+ return cls(**params)