code-202604_v2 / hf_upload /scripts /merge_adapter.py
Rakancorle11's picture
Snapshot 20260424-2142
0a4deb9 verified
#!/usr/bin/env python3
"""
Merge a LoRA adapter into the base Qwen3-Omni model and save full weights.
Handles thinker-only adapter key remapping automatically.
Usage:
python merge_adapter.py \
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632 \
--output /opt/dlami/nvme/merged_models/dpo_v4_8632
"""
from __future__ import annotations
import argparse
import json
import shutil
import tempfile
from pathlib import Path
import torch
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Merge LoRA adapter into base model.")
p.add_argument("--base-model", type=str, required=True)
p.add_argument("--adapter", type=str, required=True)
p.add_argument("--output", type=str, required=True)
return p.parse_args()
def main() -> None:
args = parse_args()
from transformers import (
AutoConfig,
AutoProcessor,
Qwen3OmniMoeForConditionalGeneration,
Qwen3OmniMoeThinkerConfig,
Qwen3OmniMoeThinkerForConditionalGeneration,
)
print(f"[1/5] Loading processor from {args.base_model} ...")
processor = AutoProcessor.from_pretrained(args.base_model, trust_remote_code=True)
model_path = Path(args.base_model)
cfg_path = model_path / "config.json" if model_path.exists() else None
model_type = None
if cfg_path and cfg_path.exists():
with open(cfg_path) as f:
model_type = json.load(f).get("model_type")
if not model_type:
try:
from huggingface_hub import hf_hub_download
cached = hf_hub_download(args.base_model, "config.json")
with open(cached) as f:
model_type = json.load(f).get("model_type")
except Exception:
pass
print(f" model_type: {model_type}")
print(f"[2/5] Loading base model ...")
if model_type == "qwen3_omni_moe_thinker":
config = Qwen3OmniMoeThinkerConfig.from_pretrained(args.base_model)
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
)
else:
config = AutoConfig.from_pretrained(args.base_model, trust_remote_code=True)
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
)
print(f"[3/5] Loading and remapping LoRA adapter: {args.adapter} ...")
from peft import PeftModel
from safetensors.torch import load_file, save_file
adapter_cfg_path = Path(args.adapter) / "adapter_config.json"
with open(adapter_cfg_path) as f:
adapter_cfg = json.load(f)
target_modules = adapter_cfg.get("target_modules", [])
needs_remap = (
any(t.startswith("model.layers.") for t in target_modules)
and model_type != "qwen3_omni_moe_thinker"
)
adapter_path = args.adapter
if needs_remap:
print(" Adapter was trained on thinker-only model; remapping keys...")
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
for fn in Path(args.adapter).iterdir():
if fn.is_dir():
continue
if fn.name == "adapter_config.json":
new_targets = []
for t in target_modules:
if t.startswith("model.layers."):
new_targets.append("thinker." + t)
elif t[0].isdigit():
new_targets.append("thinker.model.layers." + t)
else:
new_targets.append(t)
adapter_cfg["target_modules"] = new_targets
with open(tmp_dir / "adapter_config.json", "w") as f:
json.dump(adapter_cfg, f, indent=2)
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
tensors = load_file(str(fn))
remapped = {}
for k, v in tensors.items():
if ".model.layers." in k and ".thinker." not in k:
new_k = k.replace(
"base_model.model.model.layers.",
"base_model.model.thinker.model.layers.",
)
remapped[new_k] = v
else:
remapped[k] = v
save_file(remapped, str(tmp_dir / fn.name))
else:
shutil.copy2(str(fn), str(tmp_dir / fn.name))
adapter_path = str(tmp_dir)
model = PeftModel.from_pretrained(model, adapter_path)
print(f"[4/5] Merging and unloading LoRA weights ...")
model = model.merge_and_unload()
out_path = Path(args.output)
out_path.mkdir(parents=True, exist_ok=True)
print(f"[5/5] Saving merged model to {out_path} ...")
model.save_pretrained(out_path, safe_serialization=True)
processor.save_pretrained(out_path)
print(f"\nDone. Merged model saved to: {out_path}")
if __name__ == "__main__":
main()