File size: 13,163 Bytes
0433390
 
 
 
 
20edc82
 
 
0433390
 
 
 
 
 
20edc82
 
0433390
 
81dfca1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0433390
 
 
 
20edc82
0433390
 
20edc82
 
 
0433390
 
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
 
 
20edc82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0433390
 
20edc82
0433390
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
20edc82
0433390
20edc82
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
 
 
 
 
20edc82
 
 
0433390
 
 
 
20edc82
0433390
 
20edc82
0433390
 
 
 
20edc82
81dfca1
20edc82
0433390
 
 
 
 
 
 
 
 
20edc82
 
 
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20edc82
 
 
 
 
 
0433390
 
 
20edc82
0433390
20edc82
 
 
0433390
 
 
 
 
 
 
 
20edc82
 
 
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81dfca1
0433390
 
 
 
 
 
20edc82
 
 
 
 
 
 
 
 
 
 
0433390
 
 
 
 
 
 
 
20edc82
 
0433390
 
20edc82
0433390
 
 
 
 
 
 
 
 
 
 
 
 
20edc82
 
0433390
 
 
 
 
 
 
20edc82
0433390
 
20edc82
0433390
 
 
 
 
 
20edc82
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20edc82
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
"""
Universal DFlash decoder for any MLX-converted model.

Provides a high-level interface that works with any mlx_lm model,
including those without pre-built DFlash drafters.

Now uses the architecture-agnostic adapter system for proper target model
interaction across all supported families (Qwen3, Qwen3.5, LLaMA, Mistral, Gemma).
"""

from typing import Optional, List, Dict, Any
import mlx.core as mx
from .model import DFlashDraftModel
from .speculative_decode import DFlashSpeculativeDecoder
from .adapters import load_target_model, LoadedTargetModel, detect_model_architecture
from .convert import load_mlx_dflash


def _build_target_layer_ids(num_target_layers: int, num_draft_layers: int) -> List[int]:
    """Select target model layer indices for feature extraction.
    
    Uniformly samples from shallow to deep layers for cross-layer
    feature fusion, matching the DFlash paper.
    """
    if num_draft_layers == 1:
        return [num_target_layers // 2]
    start = 1
    end = num_target_layers - 3
    span = end - start
    return [
        int(round(start + (i * span) / (num_draft_layers - 1)))
        for i in range(num_draft_layers)
    ]


class UniversalDFlashDecoder:
    """Universal DFlash decoder that works with any MLX-converted model.
    
    This class handles:
    1. Loading pre-converted DFlash drafters with architecture detection
    2. Creating generic drafters for unsupported models
    3. Training custom drafters on-the-fly
    
    Key improvement: Automatically detects target model architecture and
    selects the correct adapter for hidden state extraction and KV cache management.
    """

    def __init__(
        self,
        target_model: Any,
        tokenizer,
        draft_model_path: Optional[str] = None,
        draft_layers: int = 5,
        draft_hidden_size: int = 1024,
        block_size: int = 16,
        device: str = "metal",
    ):
        """Initialize the universal decoder.
        
        Args:
            target_model: Any mlx_lm loaded model, or path/ID to load
            tokenizer: Tokenizer for the model
            draft_model_path: Optional path to pre-converted DFlash drafter
            draft_layers: Number of draft layers (if creating generic drafter)
            draft_hidden_size: Hidden size for generic drafter
            block_size: Number of tokens per draft block
            device: MLX device
        """
        self.tokenizer = tokenizer
        self.block_size = block_size
        self.device = device

        # Resolve target model
        if isinstance(target_model, str):
            print(f"[UniversalDFlash] Loading target model: {target_model}...")
            self.loaded_target = load_target_model(target_model)
            self.target_model = self.loaded_target.model
        elif hasattr(target_model, 'adapter'):
            # Already a LoadedTargetModel
            self.loaded_target = target_model
            self.target_model = target_model.model
        else:
            # Raw mlx_lm model — detect architecture
            print("[UniversalDFlash] Detecting model architecture...")
            self.target_model = target_model
            # Try to build adapter from model attributes
            arch = detect_model_architecture(target_model)
            print(f"[UniversalDFlash] Detected architecture: {arch}")
            # Create minimal LoadedTargetModel wrapper
            from .adapters import MLXTargetAdapter, adapter_for_model_type
            adapter_cls = adapter_for_model_type(arch)
            if adapter_cls is None:
                adapter_cls = MLXTargetAdapter
            adapter = adapter_cls(model=target_model, config={"model_type": arch})
            self.loaded_target = LoadedTargetModel(
                requested_model="unknown",
                resolved_model_path=None,
                model=target_model,
                tokenizer=tokenizer,
                adapter=adapter,
            )

        # Determine model type and vocab size
        self.vocab_size = getattr(tokenizer, "vocab_size", 151936)
        self.target_config = self._extract_target_config(self.target_model)

        # Load or create draft model
        if draft_model_path:
            print(f"[UniversalDFlash] Loading pre-built drafter from {draft_model_path}...")
            self.draft_model, self.draft_config = load_mlx_dflash(draft_model_path)
        else:
            print("[UniversalDFlash] Creating generic drafter for your model...")
            self.draft_model = self._create_generic_drafter(
                draft_layers=draft_layers,
                draft_hidden_size=draft_hidden_size,
            )
            self.draft_config = None

        # Create the speculative decoder with architecture-aware adapter
        self.decoder = DFlashSpeculativeDecoder(
            target_model=self.loaded_target,
            draft_model=self.draft_model,
            tokenizer=tokenizer,
            block_size=block_size,
            device=device,
        )

    def _extract_target_config(self, target_model) -> Dict[str, Any]:
        """Extract configuration from target model."""
        config = {}
        
        # Try to extract from model attributes
        if hasattr(target_model, 'config'):
            model_config = target_model.config
            config['hidden_size'] = getattr(model_config, 'hidden_size', 4096)
            config['num_layers'] = getattr(model_config, 'num_hidden_layers', 32)
            config['vocab_size'] = getattr(model_config, 'vocab_size', 151936)
            config['intermediate_size'] = getattr(model_config, 'intermediate_size', 14336)
            config['num_attention_heads'] = getattr(model_config, 'num_attention_heads', 32)
            config['num_key_value_heads'] = getattr(model_config, 'num_key_value_heads', 8)
            config['model_type'] = getattr(model_config, 'model_type', 'unknown')
        else:
            # Default Qwen3-4B-like config
            config = {
                'hidden_size': 4096,
                'num_layers': 32,
                'vocab_size': 151936,
                'intermediate_size': 14336,
                'num_attention_heads': 32,
                'num_key_value_heads': 8,
                'model_type': 'unknown',
            }

        return config

    def _create_generic_drafter(
        self,
        draft_layers: int,
        draft_hidden_size: int,
    ) -> DFlashDraftModel:
        """Create a generic DFlash drafter compatible with the target model.
        
        This creates an untrained drafter that can be trained or used
        with pre-trained weights from a similar architecture.
        
        The draft model is sized proportionally to the target model's
        hidden dimension for feature compatibility.
        """
        # Determine architecture compatibility
        hidden_size = self.target_config.get('hidden_size', 4096)
        vocab_size = self.target_config.get('vocab_size', 151936)
        num_layers = self.target_config.get('num_layers', 32)
        
        # Scale drafter based on target model size
        # Aim for ~1B params (common for draft models)
        num_heads = draft_hidden_size // 64  # ~64 dims per head
        num_kv_heads = max(1, num_heads // 4)
        intermediate_size = int(draft_hidden_size * 2.75)  # Standard SwiGLU ratio

        # Target layer ids for feature extraction
        target_layer_ids = _build_target_layer_ids(num_layers, draft_layers)

        drafter = DFlashDraftModel(
            vocab_size=vocab_size,
            hidden_size=draft_hidden_size,
            num_layers=draft_layers,
            num_heads=num_heads,
            num_kv_heads=num_kv_heads,
            intermediate_size=intermediate_size,
            max_seq_len=8192,
            block_size=self.block_size,
            mask_token_id=0,  # Will be overridden by tokenizer
            num_target_layers=num_layers,
            target_layer_ids=target_layer_ids,
        )

        return drafter

    def train_drafter(
        self,
        dataset: str,
        max_seq_length: int = 3072,
        epochs: int = 6,
        batch_size: int = 32,
        lr: float = 6e-4,
        warmup_ratio: float = 0.04,
        grad_clip: float = 1.0,
        output_path: Optional[str] = None,
    ) -> str:
        """Train a custom DFlash drafter for your target model.
        
        Uses the training recipe from the DFlash paper:
        - KV injection with target model features
        - Random anchor sampling for block construction
        - Sparse attention masking within blocks
        - Position-dependent loss decay
        
        Args:
            dataset: Path to training dataset or HF dataset name
            max_seq_length: Maximum sequence length for training
            epochs: Number of training epochs (paper: 6)
            batch_size: Training batch size
            lr: Learning rate (paper: 6e-4)
            warmup_ratio: Warmup ratio for cosine schedule (paper: 0.04)
            grad_clip: Gradient clipping threshold (paper: 1.0)
            output_path: Where to save the trained drafter
        
        Returns:
            Path to saved drafter
        """
        from .trainer import DFlashTrainer

        print(f"[UniversalDFlash] Training custom drafter...")
        print(f"  Dataset: {dataset}")
        print(f"  Epochs: {epochs}, Batch size: {batch_size}, LR: {lr}")
        
        trainer = DFlashTrainer(
            target_model=self.target_model,
            drafter=self.draft_model,
            tokenizer=self.tokenizer,
        )

        trained_model = trainer.train(
            dataset=dataset,
            max_seq_length=max_seq_length,
            epochs=epochs,
            batch_size=batch_size,
            lr=lr,
            warmup_ratio=warmup_ratio,
            grad_clip=grad_clip,
        )

        # Update the draft model
        self.draft_model = trained_model
        self.decoder.draft_model = trained_model

        if output_path:
            self.save_drafter(output_path)

        return output_path or "./trained_dflash_drafter"

    def save_drafter(self, path: str):
        """Save the current drafter model."""
        import json
        from pathlib import Path
        import numpy as np

        path = Path(path)
        path.mkdir(parents=True, exist_ok=True)

        # Save weights
        weights = dict(self.draft_model.parameters())
        
        # Try multiple formats
        try:
            np_weights = {k: np.array(v) for k, v in weights.items()}
            np.savez(str(path / "weights.npz"), **np_weights)
        except Exception:
            try:
                mx.savez(str(path / "weights.npz"), **weights)
            except Exception as e:
                print(f"[Save] Error saving weights: {e}")
                raise

        # Save config
        config = {
            "vocab_size": self.draft_model.vocab_size,
            "hidden_size": self.draft_model.hidden_size,
            "num_hidden_layers": self.draft_model.num_layers,
            "num_attention_heads": self.draft_model.num_heads,
            "num_key_value_heads": self.draft_model.num_heads // 4,
            "intermediate_size": self.draft_model.layers[0].mlp.gate_proj.weight.shape[1] 
                if hasattr(self.draft_model.layers[0].mlp.gate_proj, 'weight') else 2816,
            "max_position_embeddings": self.draft_model.max_seq_len,
            "block_size": self.draft_model.block_size,
            "target_layer_ids": self.draft_model.target_layer_ids,
        }

        with open(path / "config.json", "w") as f:
            json.dump(config, f, indent=2)

        print(f"[UniversalDFlash] Drafter saved to {path}")

    def generate(
        self,
        prompt: str,
        max_tokens: int = 2048,
        temperature: float = 0.0,
        stop_strings: Optional[List[str]] = None,
        stream: bool = False,
    ) -> str | Any:
        """Generate text using DFlash speculative decoding.
        
        Args:
            prompt: Text prompt
            max_tokens: Maximum tokens to generate
            temperature: Sampling temperature
            stop_strings: Optional stop strings
            stream: If True, returns a generator yielding text deltas
        
        Returns:
            Generated text string, or generator if stream=True
        """
        return self.decoder.generate(
            prompt=prompt,
            max_tokens=max_tokens,
            temperature=temperature,
            stop_strings=stop_strings,
            stream=stream,
        )

    def benchmark(
        self,
        prompt: str = "Write a quicksort in Python.",
        max_tokens: int = 512,
        num_runs: int = 5,
    ) -> Dict[str, float]:
        """Benchmark DFlash speculative decoding.
        
        Args:
            prompt: Test prompt
            max_tokens: Tokens per run
            num_runs: Number of benchmark runs
        
        Returns:
            Dict with speedup metrics
        """
        return self.decoder.benchmark(
            prompt=prompt,
            max_tokens=max_tokens,
            num_runs=num_runs,
        )