Load with weight_dtype=e4m3fn, to avoid unexpected shades set WanVaceToVideo->strength=0.95

Created using this base model with following script:

import os
import torch
from safetensors.torch import load_file, save_file
from tqdm import tqdm

def convert_wan_to_comfy_fp8(input_path, output_path):
    print(f"🚀 Reading source: {input_path}")
    state_dict = load_file(input_path)
    new_state_dict = {}

    critical_layers = [
        "img_in",
        "time_in",
        "guidance_in",
        "norm",
        "final_layer",
        "patch_embed"
    ]

    print("🛠 Quanting (skipping critical layers)...")
    for key, tensor in tqdm(state_dict.items()):
        is_weight = "weight" in key and tensor.ndim >= 2
        is_critical = any(c in key for c in critical_layers)

        if is_weight and not is_critical:
            new_state_dict[key] = tensor.to(torch.float8_e4m3fn)
        else:
            new_state_dict[key] = tensor

    print(f"💾 Сохраняю в: {output_path}")
    save_file(new_state_dict, output_path)

    size_diff = os.path.getsize(input_path) - os.path.getsize(output_path)
    print(f"✅ Done! Decreased size by {size_diff / 1024**2:.2f} MB")

if __name__ == "__main__":
    INPUT = "wan2.1_vace_1.3B_fp16.safetensors"
    OUTPUT = "wan2.1_vace_1.3B_fp8_scaled.safetensors"
    convert_wan_to_comfy_fp8(INPUT, OUTPUT)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for lhca521/Wan2.1-VACE-1.3B-fp8

Finetuned
(2)
this model