Upload 2 files
Browse files
RAR.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import subprocess
|
| 4 |
+
import shutil
|
| 5 |
+
import venv
|
| 6 |
+
import json
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).resolve().parent
|
| 11 |
+
VENV_DIR = ROOT / ".rar_env"
|
| 12 |
+
REPO_DIR = ROOT / "1d-tokenizer"
|
| 13 |
+
OUT_DIR = ROOT / "outputs_rar"
|
| 14 |
+
WEIGHT_DIR = ROOT / "weights"
|
| 15 |
+
|
| 16 |
+
# Defaults for direct run (no terminal args needed)
|
| 17 |
+
DEFAULT_CLASS_ID = 207
|
| 18 |
+
DEFAULT_CLASS_IDS = [1,3,5,9] # e.g., [207, 282, 404] to generate multiple by default
|
| 19 |
+
DEFAULT_NUM_IMAGES = 1
|
| 20 |
+
DEFAULT_RAR_SIZE = "rar_xl" # one of: rar_b, rar_l, rar_xl, rar_xxl
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def run(cmd, cwd=None, env=None, check=True, quiet=False):
|
| 24 |
+
# Nicely print the command without dumping large inline code blobs
|
| 25 |
+
display = cmd[:]
|
| 26 |
+
if "-c" in display:
|
| 27 |
+
try:
|
| 28 |
+
i = display.index("-c")
|
| 29 |
+
if i + 1 < len(display):
|
| 30 |
+
display[i + 1] = "<inline>"
|
| 31 |
+
except ValueError:
|
| 32 |
+
pass
|
| 33 |
+
print(f"[run] {' '.join(display)}")
|
| 34 |
+
stdout = subprocess.DEVNULL if quiet else None
|
| 35 |
+
stderr = subprocess.DEVNULL if quiet else None
|
| 36 |
+
return subprocess.run(cmd, cwd=cwd, env=env, check=check, stdout=stdout, stderr=stderr)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def ensure_venv() -> Path:
|
| 40 |
+
"""Create a local venv if missing and return its python path."""
|
| 41 |
+
if not VENV_DIR.exists():
|
| 42 |
+
print(f"[setup] Creating venv at {VENV_DIR}")
|
| 43 |
+
builder = venv.EnvBuilder(with_pip=True)
|
| 44 |
+
builder.create(VENV_DIR)
|
| 45 |
+
# Determine python executable inside venv (Windows/Linux)
|
| 46 |
+
if os.name == 'nt':
|
| 47 |
+
py = VENV_DIR / "Scripts" / "python.exe"
|
| 48 |
+
else:
|
| 49 |
+
py = VENV_DIR / "bin" / "python"
|
| 50 |
+
return py
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def in_venv() -> bool:
|
| 54 |
+
return sys.prefix != getattr(sys, "base_prefix", sys.prefix)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def install_requirements(venv_python: Path):
|
| 58 |
+
# Upgrade pip tooling first
|
| 59 |
+
run([str(venv_python), "-m", "pip", "install", "--upgrade", "pip", "setuptools", "wheel", "-q"], quiet=True)
|
| 60 |
+
# Clone repo first so we can install its requirements
|
| 61 |
+
if not REPO_DIR.exists():
|
| 62 |
+
print(f"[setup] Cloning bytedance/1d-tokenizer into {REPO_DIR}")
|
| 63 |
+
run(["git", "clone", "https://github.com/bytedance/1d-tokenizer", str(REPO_DIR)])
|
| 64 |
+
else:
|
| 65 |
+
print(f"[setup] Repo exists, pulling latest...")
|
| 66 |
+
run(["git", "pull", "--ff-only"], cwd=str(REPO_DIR))
|
| 67 |
+
|
| 68 |
+
# Install repo requirements
|
| 69 |
+
req = REPO_DIR / "requirements.txt"
|
| 70 |
+
deps_marker = VENV_DIR / ".deps_installed"
|
| 71 |
+
if req.exists():
|
| 72 |
+
if not deps_marker.exists():
|
| 73 |
+
print("[setup] Installing repo requirements (first time)")
|
| 74 |
+
run([str(venv_python), "-m", "pip", "install", "-r", str(req), "-q"], quiet=True)
|
| 75 |
+
# Ensure diffusers only if needed
|
| 76 |
+
cp = run([str(venv_python), "-c", "import diffusers"], check=False, quiet=True)
|
| 77 |
+
if cp.returncode != 0:
|
| 78 |
+
run([str(venv_python), "-m", "pip", "install", "diffusers<0.32", "-q"], quiet=True)
|
| 79 |
+
deps_marker.write_text("ok")
|
| 80 |
+
else:
|
| 81 |
+
print("[setup] Requirements already installed; skipping")
|
| 82 |
+
else:
|
| 83 |
+
print("[warn] requirements.txt not found; installing minimal deps")
|
| 84 |
+
run([str(venv_python), "-m", "pip", "install",
|
| 85 |
+
"torch>=2.0.0", "torchvision", "omegaconf", "transformers", "timm",
|
| 86 |
+
"open_clip_torch", "einops", "scipy", "pillow", "accelerate",
|
| 87 |
+
"gdown", "huggingface-hub", "wandb", "torch-fidelity", "torchinfo", "webdataset", "-q"], quiet=True)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def reexec_in_venv(venv_python: Path):
|
| 91 |
+
# Re-exec this script inside the venv
|
| 92 |
+
env = os.environ.copy()
|
| 93 |
+
env["RAR_BOOTSTRAPPED"] = "1"
|
| 94 |
+
cmd = [str(venv_python), str(Path(__file__).resolve())] + sys.argv[1:]
|
| 95 |
+
run(cmd, env=env)
|
| 96 |
+
sys.exit(0)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def hf_download(venv_python: Path, repo_id: str, filename: str, local_dir: Path) -> Path:
|
| 100 |
+
local_dir.mkdir(parents=True, exist_ok=True)
|
| 101 |
+
code = f"""
|
| 102 |
+
import sys
|
| 103 |
+
from pathlib import Path
|
| 104 |
+
from huggingface_hub import hf_hub_download
|
| 105 |
+
path = hf_hub_download(repo_id={repo_id!r}, filename={filename!r}, local_dir={str(local_dir)!r})
|
| 106 |
+
print(path)
|
| 107 |
+
"""
|
| 108 |
+
cp = subprocess.run([str(venv_python), "-c", code], stdout=subprocess.PIPE, text=True, check=True)
|
| 109 |
+
p = Path(cp.stdout.strip())
|
| 110 |
+
if not p.exists():
|
| 111 |
+
raise RuntimeError(f"Download failed for {repo_id}/{filename}")
|
| 112 |
+
return p
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def generate_imagenet_class(venv_python: Path, class_id: int, rar_size: str = "rar_xl", num_images: int = 1):
|
| 116 |
+
OUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 117 |
+
WEIGHT_DIR.mkdir(parents=True, exist_ok=True)
|
| 118 |
+
|
| 119 |
+
# Ensure weights are present
|
| 120 |
+
print("[weights] Downloading tokenizer and RAR weights if missing...")
|
| 121 |
+
tok_path = hf_download(venv_python, "fun-research/TiTok", "maskgit-vqgan-imagenet-f16-256.bin", WEIGHT_DIR)
|
| 122 |
+
rar_bin = f"{rar_size}.bin"
|
| 123 |
+
rar_path = hf_download(venv_python, "yucornetto/RAR", rar_bin, WEIGHT_DIR)
|
| 124 |
+
|
| 125 |
+
# Execute generation inline inside the venv
|
| 126 |
+
code = f"""
|
| 127 |
+
import sys
|
| 128 |
+
from pathlib import Path
|
| 129 |
+
import traceback
|
| 130 |
+
|
| 131 |
+
REPO_DIR = Path({str(REPO_DIR)!r})
|
| 132 |
+
WEIGHT_DIR = Path({str(WEIGHT_DIR)!r})
|
| 133 |
+
OUT_DIR = Path({str(OUT_DIR)!r})
|
| 134 |
+
|
| 135 |
+
try:
|
| 136 |
+
import torch
|
| 137 |
+
from PIL import Image
|
| 138 |
+
if str(REPO_DIR) not in sys.path:
|
| 139 |
+
sys.path.insert(0, str(REPO_DIR))
|
| 140 |
+
import demo_util
|
| 141 |
+
from modeling.titok import PretrainedTokenizer
|
| 142 |
+
from modeling.rar import RAR
|
| 143 |
+
|
| 144 |
+
cfg_map = {{
|
| 145 |
+
|
| 146 |
+
'rar_xl': dict(hidden_size=1280, layers=32, heads=16, mlp=5120),
|
| 147 |
+
}}
|
| 148 |
+
rar_size = {rar_size!r}
|
| 149 |
+
assert rar_size in cfg_map, f"Unsupported rar size: {{rar_size}}"
|
| 150 |
+
|
| 151 |
+
config = demo_util.get_config(str(REPO_DIR / 'configs' / 'training' / 'generator' / 'rar.yaml'))
|
| 152 |
+
config.experiment.generator_checkpoint = str(WEIGHT_DIR / f"{{rar_size}}.bin")
|
| 153 |
+
config.model.generator.hidden_size = cfg_map[rar_size]['hidden_size']
|
| 154 |
+
config.model.generator.num_hidden_layers = cfg_map[rar_size]['layers']
|
| 155 |
+
config.model.generator.num_attention_heads = cfg_map[rar_size]['heads']
|
| 156 |
+
config.model.generator.intermediate_size = cfg_map[rar_size]['mlp']
|
| 157 |
+
config.model.vq_model.pretrained_tokenizer_weight = str(WEIGHT_DIR / 'maskgit-vqgan-imagenet-f16-256.bin')
|
| 158 |
+
|
| 159 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 160 |
+
|
| 161 |
+
tokenizer = PretrainedTokenizer(config.model.vq_model.pretrained_tokenizer_weight)
|
| 162 |
+
generator = RAR(config)
|
| 163 |
+
generator.load_state_dict(torch.load(config.experiment.generator_checkpoint, map_location='cpu'))
|
| 164 |
+
generator.eval(); generator.requires_grad_(False); generator.set_random_ratio(0)
|
| 165 |
+
tokenizer.to(device)
|
| 166 |
+
generator.to(device)
|
| 167 |
+
|
| 168 |
+
cls_id = int({class_id})
|
| 169 |
+
num_images = int({num_images})
|
| 170 |
+
OUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 171 |
+
for i in range(num_images):
|
| 172 |
+
imgs = demo_util.sample_fn(
|
| 173 |
+
generator=generator,
|
| 174 |
+
tokenizer=tokenizer,
|
| 175 |
+
labels=[cls_id],
|
| 176 |
+
randomize_temperature=1.02,
|
| 177 |
+
guidance_scale=6.9,
|
| 178 |
+
guidance_scale_pow=1.5,
|
| 179 |
+
device=device,
|
| 180 |
+
)
|
| 181 |
+
Image.fromarray(imgs[0]).save(OUT_DIR / f'rar_{{rar_size}}_cls{{cls_id}}_{{i}}.png')
|
| 182 |
+
print('DONE')
|
| 183 |
+
except Exception:
|
| 184 |
+
print('[ERROR] Generation failed:')
|
| 185 |
+
traceback.print_exc()
|
| 186 |
+
raise
|
| 187 |
+
"""
|
| 188 |
+
run([str(venv_python), "-c", code])
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def parse_args():
|
| 192 |
+
import argparse
|
| 193 |
+
p = argparse.ArgumentParser(description="RAR-XL one-shot setup and sampling")
|
| 194 |
+
p.add_argument("--class_id", type=int, default=DEFAULT_CLASS_ID, help="ImageNet-1K class id [0..999]")
|
| 195 |
+
|
| 196 |
+
p.add_argument("--rar_size", type=str, default=DEFAULT_RAR_SIZE, help="RAR model variant (fixed to rar_xl)")
|
| 197 |
+
p.add_argument("--num_images", type=int, default=DEFAULT_NUM_IMAGES, help="Number of images to generate")
|
| 198 |
+
p.add_argument("--class_ids", type=int, nargs='+', help="Generate for multiple class ids [0..999]")
|
| 199 |
+
args = p.parse_args()
|
| 200 |
+
# Enforce XL regardless of user input
|
| 201 |
+
args.rar_size = "rar_xl"
|
| 202 |
+
|
| 203 |
+
# Optional: supply class IDs via env var or classes.txt without terminal args
|
| 204 |
+
if args.class_ids is None:
|
| 205 |
+
env_cls = os.environ.get("RAR_CLASS_IDS")
|
| 206 |
+
if env_cls:
|
| 207 |
+
try:
|
| 208 |
+
args.class_ids = [int(x.strip()) for x in env_cls.split(',') if x.strip()]
|
| 209 |
+
except Exception:
|
| 210 |
+
args.class_ids = None
|
| 211 |
+
if args.class_ids is None:
|
| 212 |
+
classes_file = ROOT / "classes.txt"
|
| 213 |
+
if classes_file.exists():
|
| 214 |
+
try:
|
| 215 |
+
raw = classes_file.read_text()
|
| 216 |
+
args.class_ids = [int(x) for x in raw.replace('\n', ' ').split() if x.strip()]
|
| 217 |
+
except Exception:
|
| 218 |
+
args.class_ids = None
|
| 219 |
+
if args.class_ids is None and DEFAULT_CLASS_IDS:
|
| 220 |
+
args.class_ids = list(DEFAULT_CLASS_IDS)
|
| 221 |
+
|
| 222 |
+
return args
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def main():
|
| 226 |
+
args = parse_args()
|
| 227 |
+
|
| 228 |
+
# Phase 1: ensure venv and requirements
|
| 229 |
+
if not in_venv() and os.environ.get("RAR_BOOTSTRAPPED") != "1":
|
| 230 |
+
vpy = ensure_venv()
|
| 231 |
+
install_requirements(vpy)
|
| 232 |
+
reexec_in_venv(vpy)
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
# Phase 2: already in venv — clone if needed (done in install), then generate
|
| 236 |
+
# Ensure repo exists (in case venv already existed but repo missing)
|
| 237 |
+
if not REPO_DIR.exists():
|
| 238 |
+
run(["git", "clone", "https://github.com/bytedance/1d-tokenizer", str(REPO_DIR)])
|
| 239 |
+
|
| 240 |
+
vpy = Path(sys.executable)
|
| 241 |
+
if args.class_ids:
|
| 242 |
+
for cid in args.class_ids:
|
| 243 |
+
generate_imagenet_class(vpy, class_id=int(cid), rar_size=args.rar_size, num_images=args.num_images)
|
| 244 |
+
else:
|
| 245 |
+
generate_imagenet_class(vpy, class_id=args.class_id, rar_size=args.rar_size, num_images=args.num_images)
|
| 246 |
+
print(f"[done] Images saved to {OUT_DIR}")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
if __name__ == "__main__":
|
| 250 |
+
main()
|
VAR.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import sys
|
| 4 |
+
import urllib.request
|
| 5 |
+
import venv
|
| 6 |
+
import textwrap
|
| 7 |
+
|
| 8 |
+
ENV_DIR = "var_env"
|
| 9 |
+
|
| 10 |
+
# ==============================
|
| 11 |
+
# 1. Create a clean venv
|
| 12 |
+
# ==============================
|
| 13 |
+
if not os.path.exists(ENV_DIR):
|
| 14 |
+
print(f">>> Creating virtual environment: {ENV_DIR}")
|
| 15 |
+
venv.EnvBuilder(with_pip=True).create(ENV_DIR)
|
| 16 |
+
else:
|
| 17 |
+
print(f">>> Using existing virtual environment: {ENV_DIR}")
|
| 18 |
+
|
| 19 |
+
def find_venv_python(env_dir):
|
| 20 |
+
# Windows
|
| 21 |
+
win_dir = os.path.join(env_dir, "Scripts")
|
| 22 |
+
if os.path.exists(win_dir):
|
| 23 |
+
for name in ["python.exe", "python3.exe"]:
|
| 24 |
+
candidate = os.path.join(win_dir, name)
|
| 25 |
+
if os.path.exists(candidate):
|
| 26 |
+
return os.path.abspath(candidate)
|
| 27 |
+
# Unix
|
| 28 |
+
unix_dir = os.path.join(env_dir, "bin")
|
| 29 |
+
if os.path.exists(unix_dir):
|
| 30 |
+
for name in ["python3", "python"]:
|
| 31 |
+
candidate = os.path.join(unix_dir, name)
|
| 32 |
+
if os.path.exists(candidate):
|
| 33 |
+
return os.path.abspath(candidate)
|
| 34 |
+
return sys.executable
|
| 35 |
+
|
| 36 |
+
VENV_PY = find_venv_python(ENV_DIR)
|
| 37 |
+
print(">>> Using venv Python at:", VENV_PY)
|
| 38 |
+
|
| 39 |
+
# ==============================
|
| 40 |
+
# 2. Clone VAR repo if missing
|
| 41 |
+
# ==============================
|
| 42 |
+
if not os.path.exists("VAR"):
|
| 43 |
+
print(">>> Cloning VAR repo...")
|
| 44 |
+
subprocess.run(["git", "clone", "https://github.com/FoundationVision/VAR.git"], check=True)
|
| 45 |
+
|
| 46 |
+
os.chdir("VAR")
|
| 47 |
+
|
| 48 |
+
# ==============================
|
| 49 |
+
# 3. Download checkpoints
|
| 50 |
+
# ==============================
|
| 51 |
+
os.makedirs("checkpoints/var", exist_ok=True)
|
| 52 |
+
os.makedirs("checkpoints/vae", exist_ok=True)
|
| 53 |
+
|
| 54 |
+
def download(url, out_path):
|
| 55 |
+
if not os.path.exists(out_path):
|
| 56 |
+
print(f">>> Downloading {out_path}")
|
| 57 |
+
urllib.request.urlretrieve(url, out_path)
|
| 58 |
+
else:
|
| 59 |
+
print(f">>> Already exists: {out_path}")
|
| 60 |
+
|
| 61 |
+
download("https://huggingface.co/FoundationVision/var/resolve/main/var_d16.pth",
|
| 62 |
+
"checkpoints/var/var_d16.pth")
|
| 63 |
+
download("https://huggingface.co/FoundationVision/var/resolve/main/vae_ch160v4096z32.pth",
|
| 64 |
+
"checkpoints/vae/vae_ch160v4096z32.pth")
|
| 65 |
+
|
| 66 |
+
# ==============================
|
| 67 |
+
# 4. Install dependencies
|
| 68 |
+
# ==============================
|
| 69 |
+
print(">>> Installing dependencies in venv")
|
| 70 |
+
subprocess.run([VENV_PY, "-m", "pip", "install", "--upgrade", "pip"], check=True)
|
| 71 |
+
subprocess.run([VENV_PY, "-m", "pip", "install",
|
| 72 |
+
"torch>=2.0.0", "torchvision", "torchaudio",
|
| 73 |
+
"--index-url", "https://download.pytorch.org/whl/cu121"], check=True)
|
| 74 |
+
|
| 75 |
+
# clean torch pin
|
| 76 |
+
req_file = "requirements.txt"
|
| 77 |
+
if os.path.exists(req_file):
|
| 78 |
+
with open(req_file, "r") as f:
|
| 79 |
+
lines = f.readlines()
|
| 80 |
+
with open(req_file, "w") as f:
|
| 81 |
+
for line in lines:
|
| 82 |
+
if line.strip().startswith("torch"):
|
| 83 |
+
continue
|
| 84 |
+
f.write(line)
|
| 85 |
+
|
| 86 |
+
subprocess.run([VENV_PY, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
|
| 87 |
+
|
| 88 |
+
# ==============================
|
| 89 |
+
# 5. Write sample.py (generation code)
|
| 90 |
+
# ==============================
|
| 91 |
+
sample_code = textwrap.dedent("""
|
| 92 |
+
import argparse, os, torch, random, numpy as np
|
| 93 |
+
from PIL import Image
|
| 94 |
+
from models import build_vae_var
|
| 95 |
+
|
| 96 |
+
def main():
|
| 97 |
+
parser = argparse.ArgumentParser()
|
| 98 |
+
parser.add_argument("--ckpt", type=str, required=True)
|
| 99 |
+
parser.add_argument("--vae", type=str, required=True)
|
| 100 |
+
parser.add_argument("--depth", type=int, default=16)
|
| 101 |
+
parser.add_argument("--classes", type=int, nargs="+", default=[207,483,701,970])
|
| 102 |
+
parser.add_argument("--cfg", type=float, default=4.0)
|
| 103 |
+
parser.add_argument("--output", type=str, default="outputs/var_class_samples")
|
| 104 |
+
args = parser.parse_args()
|
| 105 |
+
|
| 106 |
+
seed = 0
|
| 107 |
+
torch.manual_seed(seed); random.seed(seed); np.random.seed(seed)
|
| 108 |
+
torch.backends.cudnn.deterministic = True
|
| 109 |
+
torch.backends.cudnn.benchmark = False
|
| 110 |
+
|
| 111 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 112 |
+
patch_nums = (1,2,3,4,5,6,8,10,13,16)
|
| 113 |
+
|
| 114 |
+
vae, var = build_vae_var(V=4096, Cvae=32, ch=160, share_quant_resi=4,
|
| 115 |
+
device=device, patch_nums=patch_nums,
|
| 116 |
+
num_classes=1000, depth=args.depth, shared_aln=False)
|
| 117 |
+
vae.load_state_dict(torch.load(args.vae, map_location="cpu"))
|
| 118 |
+
var.load_state_dict(torch.load(args.ckpt, map_location="cpu"))
|
| 119 |
+
vae.eval(); var.eval()
|
| 120 |
+
for p in vae.parameters(): p.requires_grad_(False)
|
| 121 |
+
for p in var.parameters(): p.requires_grad_(False)
|
| 122 |
+
|
| 123 |
+
labels = torch.tensor(args.classes, device=device, dtype=torch.long)
|
| 124 |
+
|
| 125 |
+
with torch.inference_mode():
|
| 126 |
+
with torch.autocast("cuda", enabled=True, dtype=torch.float16):
|
| 127 |
+
imgs = var.autoregressive_infer_cfg(
|
| 128 |
+
B=len(labels), label_B=labels,
|
| 129 |
+
cfg=args.cfg, top_k=900, top_p=0.95,
|
| 130 |
+
g_seed=seed, more_smooth=False
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
os.makedirs(args.output, exist_ok=True)
|
| 134 |
+
for i, img in enumerate(imgs):
|
| 135 |
+
arr = img.permute(1,2,0).mul(255).clamp(0,255).byte().cpu().numpy()
|
| 136 |
+
out_path = os.path.join(args.output, f"class_{args.classes[i]}_{i}.png")
|
| 137 |
+
Image.fromarray(arr).resize((256,256), Image.LANCZOS).save(out_path)
|
| 138 |
+
print(">>> Saved", out_path)
|
| 139 |
+
|
| 140 |
+
if __name__ == "__main__":
|
| 141 |
+
main()
|
| 142 |
+
""")
|
| 143 |
+
|
| 144 |
+
with open("sample.py", "w") as f:
|
| 145 |
+
f.write(sample_code)
|
| 146 |
+
|
| 147 |
+
# ==============================
|
| 148 |
+
# 6. Run sample generation
|
| 149 |
+
# ==============================
|
| 150 |
+
print(">>> Running class-conditional generation in venv")
|
| 151 |
+
os.makedirs("outputs/var_class_samples", exist_ok=True)
|
| 152 |
+
|
| 153 |
+
subprocess.run([VENV_PY, "sample.py",
|
| 154 |
+
"--ckpt", "checkpoints/var/var_d16.pth",
|
| 155 |
+
"--vae", "checkpoints/vae/vae_ch160v4096z32.pth",
|
| 156 |
+
"--depth", "16",
|
| 157 |
+
"--classes", "207", "483", "701", "970",
|
| 158 |
+
"--output", "outputs/var_class_samples"], check=True)
|
| 159 |
+
|
| 160 |
+
print(">>> Done! Check images in VAR/outputs/var_class_samples/")
|