File size: 11,744 Bytes
0433390
 
 
 
 
c61f568
 
0433390
 
 
 
 
c61f568
0433390
c61f568
0433390
 
 
 
c61f568
 
 
 
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c61f568
 
 
 
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c61f568
0433390
 
 
c61f568
 
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c61f568
 
 
 
 
 
0433390
 
 
 
 
c61f568
 
 
 
 
 
 
 
 
 
0433390
c61f568
 
 
 
 
0433390
c61f568
0433390
 
 
 
c61f568
 
 
 
0433390
 
 
 
 
 
 
 
c61f568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0433390
 
 
 
 
 
c61f568
0433390
 
c61f568
 
0433390
 
 
 
 
 
c61f568
 
0433390
 
 
c61f568
 
 
 
 
 
0433390
 
c61f568
0433390
 
c61f568
 
 
0433390
 
c61f568
 
 
0433390
 
c61f568
 
0433390
c61f568
0433390
c61f568
 
0433390
c61f568
 
0433390
c61f568
 
0433390
c61f568
0433390
 
 
c61f568
0433390
 
 
 
c61f568
0433390
 
 
 
c61f568
 
0433390
 
c61f568
0433390
c61f568
0433390
c61f568
 
 
 
 
 
0433390
 
 
 
 
 
c61f568
0433390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c61f568
 
 
 
 
 
 
 
 
 
 
 
 
 
0433390
 
 
 
 
 
 
 
 
 
 
c61f568
0433390
 
 
 
 
 
c61f568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
"""
Convert PyTorch DFlash drafter models to MLX format.

Handles weight conversion from PyTorch safetensors to MLX arrays,
compatible with any z-lab DFlash drafter.

Updated to work with the universal adapter system for any target model family.
"""

import json
import os
from pathlib import Path
from typing import Optional, Dict, Tuple
import mlx.core as mx
from transformers import AutoConfig
from huggingface_hub import hf_hub_download, snapshot_download


def _convert_key(key: str) -> str:
    """Convert PyTorch parameter names to MLX format.
    
    Handles various naming conventions across model families.
    """
    # Replace PyTorch-specific prefixes
    key = key.replace("model.", "")
    # Standardize naming
    replacements = {
        "embed_tokens": "embed_tokens",
        "layers.": "layers.",
        "self_attn.": "self_attn.",
        "mlp.": "mlp.",
        "input_layernorm": "input_layernorm",
        "post_attention_layernorm": "post_attention_layernorm",
        "norm": "norm",
        "lm_head": "lm_head",
        "q_proj": "q_proj",
        "k_proj": "k_proj",
        "v_proj": "v_proj",
        "o_proj": "o_proj",
        "gate_proj": "gate_proj",
        "up_proj": "up_proj",
        "down_proj": "down_proj",
        "fc": "fc",
        "hidden_norm": "hidden_norm",
        "q_norm": "q_norm",
        "k_norm": "k_norm",
        "weight": "weight",
    }
    return key


def _transpose_if_needed(key: str, tensor) -> mx.array:
    """Transpose linear layer weights from PyTorch to MLX format.
    
    Linear layers in PyTorch are [out, in], MLX expects [in, out].
    """
    if "proj" in key or "fc" in key or "lm_head" in key or "embed" in key:
        if len(tensor.shape) == 2:
            return mx.array(tensor.T)
    return mx.array(tensor)


def convert_dflash_to_mlx(
    pytorch_model_id: str,
    output_path: str,
    trust_remote_code: bool = True,
    token: Optional[str] = None,
) -> str:
    """Convert a PyTorch DFlash drafter to MLX format.
    
    Args:
        pytorch_model_id: Hugging Face model ID (e.g., "z-lab/Qwen3-4B-DFlash-b16")
        output_path: Local directory to save converted model
        trust_remote_code: Whether to trust custom modeling code
        token: HF API token for gated/private models
    
    Returns:
        Path to the converted model directory
    """
    output_path = Path(output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    print(f"[Convert] Downloading {pytorch_model_id}...")
    
    # Download model files
    repo_path = snapshot_download(
        repo_id=pytorch_model_id,
        token=token,
        ignore_patterns=["*.md", "*.png", "*.jpg", "*.gif", "*.jpeg"],
    )
    repo_path = Path(repo_path)

    # Load PyTorch model config
    print("[Convert] Loading PyTorch config...")
    config = AutoConfig.from_pretrained(
        repo_path,
        trust_remote_code=trust_remote_code,
    )

    # Extract DFlash-specific config
    dflash_config = {
        "vocab_size": getattr(config, "vocab_size", 151936),
        "hidden_size": getattr(config, "hidden_size", 1024),
        "num_hidden_layers": getattr(config, "num_hidden_layers", 5),
        "num_attention_heads": getattr(config, "num_attention_heads", 16),
        "num_key_value_heads": getattr(config, "num_key_value_heads", 4),
        "intermediate_size": getattr(config, "intermediate_size", 2816),
        "max_position_embeddings": getattr(config, "max_position_embeddings", 32768),
        "rms_norm_eps": getattr(config, "rms_norm_eps", 1e-6),
        "block_size": getattr(config, "block_size", 16),
        "rope_base": getattr(config, "rope_theta", 10000.0),
    }
    
    # Extract target layer IDs if present in config
    if hasattr(config, "target_layer_ids"):
        dflash_config["target_layer_ids"] = config.target_layer_ids
    elif hasattr(config, "dflash_config") and hasattr(config.dflash_config, "target_layer_ids"):
        dflash_config["target_layer_ids"] = config.dflash_config.target_layer_ids

    # Load weights from safetensors
    print("[Convert] Loading weights from safetensors...")
    try:
        from safetensors.torch import load_file
        
        # Find all safetensors files
        safetensors_files = sorted(repo_path.glob("*.safetensors"))
        
        if safetensors_files:
            pt_weights = {}
            for st_file in safetensors_files:
                print(f"  Loading {st_file.name}...")
                partial = load_file(str(st_file))
                pt_weights.update(partial)
        else:
            # Try pytorch_model.bin
            bin_file = repo_path / "pytorch_model.bin"
            if bin_file.exists():
                import torch
                pt_weights = torch.load(str(bin_file), map_location="cpu")
            else:
                raise FileNotFoundError("No safetensors or pytorch_model.bin found")
    except ImportError:
        # Fallback to torch load
        import torch
        weights_file = repo_path / "pytorch_model.bin"
        if weights_file.exists():
            pt_weights = torch.load(str(weights_file), map_location="cpu")
        else:
            raise FileNotFoundError("No weight files found and safetensors not installed")

    # Convert weights
    print(f"[Convert] Converting {len(pt_weights)} parameters...")
    mlx_weights = {}
    for key, tensor in pt_weights.items():
        mlx_key = _convert_key(key)
        mlx_weights[mlx_key] = _transpose_if_needed(key, tensor)

    # Save MLX weights (try safetensors, fallback to npz)
    weights_path = output_path / "weights.npz"
    try:
        # Use numpy format if safetensors save is problematic
        import numpy as np
        np_weights = {k: np.array(v) for k, v in mlx_weights.items()}
        np.savez(str(weights_path), **np_weights)
        print(f"[Convert] Saved weights to {weights_path}")
    except Exception as e:
        print(f"[Convert] Warning: Could not save weights: {e}")
        # Try direct mlx save
        try:
            mx.savez(str(weights_path), **mlx_weights)
        except Exception as e2:
            print(f"[Convert] Error saving weights: {e2}")
            raise

    # Save config
    config_path = output_path / "config.json"
    with open(config_path, "w") as f:
        json.dump(dflash_config, f, indent=2)

    # Save target model mapping
    target_info = {
        "source_model": pytorch_model_id,
        "target_model": infer_target_model(pytorch_model_id),
        "conversion_date": str(Path(__file__).stat().st_mtime),
    }
    info_path = output_path / "model_info.json"
    with open(info_path, "w") as f:
        json.dump(target_info, f, indent=2)

    print(f"[Convert] Done! Model saved to {output_path}")
    print(f"  Config: {dflash_config}")
    print(f"  Target: {target_info['target_model']}")
    return str(output_path)


def infer_target_model(dflash_model_id: str) -> str:
    """Infer the target model from DFlash drafter ID.
    
    Maps known drafter checkpoints to their corresponding target models.
    Supports all official z-lab DFlash models plus community variants.
    """
    # Map drafter IDs to target models
    mapping = {
        # Qwen3 series
        "Qwen3-4B-DFlash": "Qwen/Qwen3-4B",
        "Qwen3-8B-DFlash": "Qwen/Qwen3-8B",
        "Qwen3-32B-DFlash": "Qwen/Qwen3-32B",
        # Qwen3.5 series
        "Qwen3.5-4B-DFlash": "Qwen/Qwen3.5-4B",
        "Qwen3.5-9B-DFlash": "Qwen/Qwen3.5-9B",
        "Qwen3.5-27B-DFlash": "Qwen/Qwen3.5-27B",
        "Qwen3.5-35B-A3B-DFlash": "Qwen/Qwen3.5-35B-A3B",
        "Qwen3.5-122B-A10B-DFlash": "Qwen/Qwen3.5-122B-A10B",
        # Qwen3.6 series
        "Qwen3.6-27B-DFlash": "Qwen/Qwen3.6-27B",
        "Qwen3.6-35B-A3B-DFlash": "Qwen/Qwen3.6-35B-A3B",
        # Qwen Coder
        "Qwen3-Coder-Next-DFlash": "Qwen/Qwen3-Coder-Next",
        "Qwen3-Coder-30B-A3B-DFlash": "Qwen/Qwen3-Coder-30B-A3B",
        # LLaMA
        "LLaMA3.1-8B-Instruct-DFlash": "meta-llama/Llama-3.1-8B-Instruct",
        "LLaMA3.1-70B-Instruct-DFlash": "meta-llama/Llama-3.1-70B-Instruct",
        # Gemma
        "gemma-4-31B-it-DFlash": "google/gemma-4-31b-it",
        "gemma-4-26B-A4B-it-DFlash": "google/gemma-4-26b-a4b-it",
        # GPT-OSS
        "gpt-oss-20b-DFlash": "openai/gpt-oss-20b",
        "gpt-oss-120b-DFlash": "openai/gpt-oss-120b",
        # Kimi
        "Kimi-K2.5-DFlash": "moonshotai/Kimi-K2.5",
        # MiniMax
        "MiniMax-M2.5-DFlash": "MiniMax/MiniMax-M2.5",
    }
    
    # Direct mapping lookup
    for key, target in mapping.items():
        if key in dflash_model_id:
            return target
    
    # Generic inference by model family
    if "Qwen3.6" in dflash_model_id:
        return "Qwen/Qwen3.6-27B"
    elif "Qwen3.5" in dflash_model_id:
        return "Qwen/Qwen3.5-9B"
    elif "Qwen3-Coder" in dflash_model_id:
        return "Qwen/Qwen3-Coder-Next"
    elif "Qwen3" in dflash_model_id:
        return "Qwen/Qwen3-4B"
    elif "LLaMA" in dflash_model_id or "Llama" in dflash_model_id or "llama" in dflash_model_id:
        return "meta-llama/Llama-3.1-8B-Instruct"
    elif "gemma" in dflash_model_id.lower():
        return "google/gemma-4-31b-it"
    elif "gpt-oss" in dflash_model_id.lower():
        return "openai/gpt-oss-20b"
    elif "Kimi" in dflash_model_id:
        return "moonshotai/Kimi-K2.5"
    elif "MiniMax" in dflash_model_id:
        return "MiniMax/MiniMax-M2.5"
    
    return "unknown"


def load_mlx_dflash(
    model_path: str,
) -> Tuple:
    """Load a converted MLX DFlash model.
    
    Args:
        model_path: Path to converted MLX model directory
    
    Returns:
        Tuple of (model, config)
    """
    from .model import DFlashDraftModel

    model_path = Path(model_path)
    
    # Load config
    with open(model_path / "config.json", "r") as f:
        config = json.load(f)

    # Load weights
    weights_path = model_path / "weights.npz"
    if not weights_path.exists():
        # Try alternative extensions
        for ext in [".safetensors", ".mlx", ".npz"]:
            alt = model_path / f"weights{ext}"
            if alt.exists():
                weights_path = alt
                break
    
    if not weights_path.exists():
        raise FileNotFoundError(f"No weights found in {model_path}")
    
    weights = mx.load(str(weights_path))
    
    # Build model
    model = DFlashDraftModel(
        vocab_size=config["vocab_size"],
        hidden_size=config["hidden_size"],
        num_layers=config["num_hidden_layers"],
        num_heads=config["num_attention_heads"],
        num_kv_heads=config["num_key_value_heads"],
        intermediate_size=config["intermediate_size"],
        max_seq_len=config["max_position_embeddings"],
        block_size=config.get("block_size", 16),
        rope_base=config.get("rope_base", 10000.0),
        target_layer_ids=config.get("target_layer_ids", None),
    )

    # Load weights into model
    model.update(weights)

    return model, config


def main():
    """CLI entry point for conversion."""
    import argparse
    parser = argparse.ArgumentParser(description="Convert PyTorch DFlash drafter to MLX")
    parser.add_argument("--model", required=True, help="HF model ID of PyTorch drafter")
    parser.add_argument("--output", required=True, help="Output directory")
    parser.add_argument("--trust-remote-code", action="store_true", default=True)
    parser.add_argument("--token", default=None, help="HF token for gated models")
    args = parser.parse_args()
    
    convert_dflash_to_mlx(
        pytorch_model_id=args.model,
        output_path=args.output,
        trust_remote_code=args.trust_remote_code,
        token=args.token,
    )


if __name__ == "__main__":
    main()