File size: 5,189 Bytes
0a4deb9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | #!/usr/bin/env python3
"""
Merge a LoRA adapter into the base Qwen3-Omni model and save full weights.
Handles thinker-only adapter key remapping automatically.
Usage:
python merge_adapter.py \
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632 \
--output /opt/dlami/nvme/merged_models/dpo_v4_8632
"""
from __future__ import annotations
import argparse
import json
import shutil
import tempfile
from pathlib import Path
import torch
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Merge LoRA adapter into base model.")
p.add_argument("--base-model", type=str, required=True)
p.add_argument("--adapter", type=str, required=True)
p.add_argument("--output", type=str, required=True)
return p.parse_args()
def main() -> None:
args = parse_args()
from transformers import (
AutoConfig,
AutoProcessor,
Qwen3OmniMoeForConditionalGeneration,
Qwen3OmniMoeThinkerConfig,
Qwen3OmniMoeThinkerForConditionalGeneration,
)
print(f"[1/5] Loading processor from {args.base_model} ...")
processor = AutoProcessor.from_pretrained(args.base_model, trust_remote_code=True)
model_path = Path(args.base_model)
cfg_path = model_path / "config.json" if model_path.exists() else None
model_type = None
if cfg_path and cfg_path.exists():
with open(cfg_path) as f:
model_type = json.load(f).get("model_type")
if not model_type:
try:
from huggingface_hub import hf_hub_download
cached = hf_hub_download(args.base_model, "config.json")
with open(cached) as f:
model_type = json.load(f).get("model_type")
except Exception:
pass
print(f" model_type: {model_type}")
print(f"[2/5] Loading base model ...")
if model_type == "qwen3_omni_moe_thinker":
config = Qwen3OmniMoeThinkerConfig.from_pretrained(args.base_model)
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
)
else:
config = AutoConfig.from_pretrained(args.base_model, trust_remote_code=True)
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
)
print(f"[3/5] Loading and remapping LoRA adapter: {args.adapter} ...")
from peft import PeftModel
from safetensors.torch import load_file, save_file
adapter_cfg_path = Path(args.adapter) / "adapter_config.json"
with open(adapter_cfg_path) as f:
adapter_cfg = json.load(f)
target_modules = adapter_cfg.get("target_modules", [])
needs_remap = (
any(t.startswith("model.layers.") for t in target_modules)
and model_type != "qwen3_omni_moe_thinker"
)
adapter_path = args.adapter
if needs_remap:
print(" Adapter was trained on thinker-only model; remapping keys...")
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
for fn in Path(args.adapter).iterdir():
if fn.is_dir():
continue
if fn.name == "adapter_config.json":
new_targets = []
for t in target_modules:
if t.startswith("model.layers."):
new_targets.append("thinker." + t)
elif t[0].isdigit():
new_targets.append("thinker.model.layers." + t)
else:
new_targets.append(t)
adapter_cfg["target_modules"] = new_targets
with open(tmp_dir / "adapter_config.json", "w") as f:
json.dump(adapter_cfg, f, indent=2)
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
tensors = load_file(str(fn))
remapped = {}
for k, v in tensors.items():
if ".model.layers." in k and ".thinker." not in k:
new_k = k.replace(
"base_model.model.model.layers.",
"base_model.model.thinker.model.layers.",
)
remapped[new_k] = v
else:
remapped[k] = v
save_file(remapped, str(tmp_dir / fn.name))
else:
shutil.copy2(str(fn), str(tmp_dir / fn.name))
adapter_path = str(tmp_dir)
model = PeftModel.from_pretrained(model, adapter_path)
print(f"[4/5] Merging and unloading LoRA weights ...")
model = model.merge_and_unload()
out_path = Path(args.output)
out_path.mkdir(parents=True, exist_ok=True)
print(f"[5/5] Saving merged model to {out_path} ...")
model.save_pretrained(out_path, safe_serialization=True)
processor.save_pretrained(out_path)
print(f"\nDone. Merged model saved to: {out_path}")
if __name__ == "__main__":
main()
|