""" NeAR — Image-to-Relightable 3D Gaussian Splatting Pipeline: ① Geometry → ② SLaT → ③ Videos → ④ PBR GLB - ZeroGPU: each @spaces.GPU callback gets a fresh CUDA context. - CPU preload: Hunyuan3D + NeAR weights warmed at container start. """ from __future__ import annotations import gc import os import shutil import sys import threading import time from pathlib import Path from typing import Any, Dict, Optional import gradio as gr import imageio import numpy as np import torch import trimesh from PIL import Image # ── HF token (from Space secret "near" if HF_TOKEN is absent) ────────────── if not os.environ.get("HF_TOKEN") and not os.environ.get("HUGGING_FACE_HUB_TOKEN"): _tok = (os.environ.get("near") or os.environ.get("NEAR") or "").strip() if _tok: os.environ["HF_TOKEN"] = _tok # ── ZeroGPU / spaces ──────────────────────────────────────────────────────── try: import spaces # type: ignore[reportMissingImports] except ImportError: spaces = None GPU = spaces.GPU if spaces is not None else (lambda f: f) def _gpu(duration: int = 120): """ZeroGPU decorator; no-op when spaces is not installed.""" if spaces is not None: return spaces.GPU(duration=duration) return lambda f: f # ── Backends (must be set before importing spconv / xformers) ──────────────── sys.path.insert(0, "./hy3dshape") os.environ.setdefault("ATTN_BACKEND", "xformers") os.environ.setdefault("SPCONV_ALGO", "native") os.environ.setdefault("TORCH_CUDA_ARCH_LIST", "7.5;8.0;8.6;8.9;9.0") from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline # type: ignore from hy3dshape.rembg import BackgroundRemover # type: ignore # NeAR is imported lazily inside functions to avoid pulling gsplat / heavy # submodules into the main process, which can initialise CUDA and break # Hugging Face ZeroGPU context management. # ── Paths ──────────────────────────────────────────────────────────────────── APP_DIR = Path(__file__).resolve().parent CACHE_DIR = APP_DIR / "tmp_gradio" CACHE_DIR.mkdir(exist_ok=True) DEFAULT_IMAGE = APP_DIR / "assets/example_image/T.png" DEFAULT_HDRI = APP_DIR / "assets/hdris/studio_small_03_1k.exr" MAX_SEED = int(np.iinfo(np.int32).max) # ── Global model singletons ────────────────────────────────────────────────── _MODEL_LOCK = threading.Lock() PIPELINE: Optional[NeARImageToRelightable3DPipeline] = None GEOMETRY_PIPELINE: Optional[Hunyuan3DDiTFlowMatchingPipeline] = None _LIGHT_PREPROCESSOR: Optional[BackgroundRemover] = None # ── Helpers ────────────────────────────────────────────────────────────────── def _truthy_env(name: str, default: str) -> bool: v = os.environ.get(name, default).strip().lower() return v in ("1", "true", "yes", "on") _CPU_PRELOAD = _truthy_env("NEAR_MODEL_CPU_PRELOAD_AT_START", "1" if spaces is not None else "0") def _free_cuda() -> None: gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() def _path_is_git_lfs(p: Path) -> bool: try: return p.is_file() and p.stat().st_size <= 512 and \ p.read_bytes()[:120].startswith(b"version https://git-lfs.github.com/spec/v1") except OSError: return False # ── CPU preload (background thread, no GPU lease) ──────────────────────────── def _load_near_cpu_locked() -> None: global PIPELINE if PIPELINE is not None: return print("[NeAR] loading NeAR on CPU…", flush=True) t0 = time.time() from trellis.pipelines import NeARImageToRelightable3DPipeline PIPELINE = NeARImageToRelightable3DPipeline.from_pretrained("luh0502/NeAR") PIPELINE.to("cpu") # Ensure renderer/tone_mapper are NOT initialized on CPU — they need a live CUDA context. PIPELINE.renderer = None PIPELINE.tone_mapper = None print(f"[NeAR] NeAR CPU load done in {time.time()-t0:.1f}s", flush=True) def _load_geometry_cpu_locked() -> None: global GEOMETRY_PIPELINE if GEOMETRY_PIPELINE is not None: return print("[NeAR] loading Hunyuan3D on CPU…", flush=True) t0 = time.time() GEOMETRY_PIPELINE = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained( os.environ.get("NEAR_HUNYUAN_PRETRAINED", "tencent/Hunyuan3D-2.1"), device="cpu", ) print(f"[NeAR] Hunyuan3D CPU load done in {time.time()-t0:.1f}s", flush=True) def _preload_worker() -> None: """Warm disk caches in the background so the first @GPU callback only pays from_pretrained from disk (no network wait inside the 120 s lease). CRITICAL: we must NOT instantiate NeAR in the main process. NeAR's from_pretrained chain calls torchvision.convnext_tiny(weights=...) which triggers torch._C._cuda_init — that permanently breaks ZeroGPU. """ # Step 1: load Hunyuan3D into CPU RAM (same pattern as app_hyshape.py). try: with _MODEL_LOCK: _load_geometry_cpu_locked() print("[NeAR] preload: Hunyuan3D in CPU RAM.", flush=True) except Exception as exc: print(f"[NeAR] preload: Hunyuan3D failed: {exc}", flush=True) # Step 2: warm NeAR disk cache (no model instantiation — safe). try: from huggingface_hub import snapshot_download snapshot_download(repo_id="luh0502/NeAR", token=os.environ.get("HF_TOKEN")) print("[NeAR] preload: NeAR disk cache ready.", flush=True) except Exception as exc: print(f"[NeAR] preload: NeAR disk cache failed: {exc}", flush=True) # Step 3: warm rembg model cache (briaai/RMBG-2.0, referenced in pipeline.yaml). try: from huggingface_hub import snapshot_download snapshot_download(repo_id="briaai/RMBG-2.0", token=os.environ.get("HF_TOKEN")) print("[NeAR] preload: RMBG-2.0 disk cache ready.", flush=True) except Exception as exc: print(f"[NeAR] preload: RMBG-2.0 disk cache failed: {exc}", flush=True) # Step 4: pre-download DINOv2 weights file. if not (os.environ.get("NEAR_DINO_LOCAL_REPO") or os.environ.get("NEAR_AUX_REPO")): try: import torch ckpt_dir = os.path.join(torch.hub.get_dir(), "checkpoints") os.makedirs(ckpt_dir, exist_ok=True) ckpt_path = os.path.join(ckpt_dir, "dinov2_vitl14_reg4_pretrain.pth") if not os.path.exists(ckpt_path): torch.hub.download_url_to_file( "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth", ckpt_path, progress=True, ) print("[NeAR] preload: DINOv2 weights file cached.", flush=True) except Exception as exc: print(f"[NeAR] preload: DINOv2 weight prefetch failed: {exc}", flush=True) # Step 5: clone DINOv2 torch.hub repo so torch.hub.load avoids GitHub inside @GPU. if not (os.environ.get("NEAR_DINO_LOCAL_REPO") or os.environ.get("NEAR_AUX_REPO")): try: import os as _os, subprocess as _subprocess, torch as _torch hub_dir = _torch.hub.get_dir() repo_dir = _os.path.join(hub_dir, "facebookresearch_dinov2_main") if not _os.path.exists(repo_dir): _subprocess.run( ["git", "clone", "--depth", "1", "https://github.com/facebookresearch/dinov2.git", repo_dir], check=True, timeout=120, ) _os.environ["NEAR_DINO_LOCAL_REPO"] = repo_dir print("[NeAR] preload: DINOv2 repo cloned for local torch.hub.", flush=True) except Exception as exc: print(f"[NeAR] preload: DINOv2 repo clone failed: {exc}", flush=True) # Step 6: pre-download convnext_tiny ImageNet weights. # torchvision's convnext_tiny(weights=...) is called inside NeAR's Hdri_Encoder # __init__. Downloading it here prevents a network stall inside the @GPU lease. try: import os as _os, torch as _torch ckpt_dir = _os.path.join(_torch.hub.get_dir(), "checkpoints") _os.makedirs(ckpt_dir, exist_ok=True) ckpt_path = _os.path.join(ckpt_dir, "convnext_tiny-983f1562.pth") if not _os.path.exists(ckpt_path): _torch.hub.download_url_to_file( "https://download.pytorch.org/models/convnext_tiny-983f1562.pth", ckpt_path, progress=True, ) print("[NeAR] preload: convnext_tiny weights cached.", flush=True) except Exception as exc: print(f"[NeAR] preload: convnext_tiny cache failed: {exc}", flush=True) # ── GPU ensure helpers ──────────────────────────────────────────────────────── # Called at the top of EVERY @GPU callback. Always re-creates renderer and # tone_mapper because each ZeroGPU call has a fresh CUDA context. def _ensure_near_on_cuda() -> NeARImageToRelightable3DPipeline: # NeAR is loaded from disk inside the @GPU callback. We deliberately do # NOT preload it in the main process because its from_pretrained chain # calls torchvision.convnext_tiny(weights=...) which triggers # torch._C._cuda_init and permanently breaks ZeroGPU context management. # ZeroGPU runs one GPU callback at a time so no lock is needed. _load_near_cpu_locked() assert PIPELINE is not None # ── Cleanup old CUDA objects before re-init ───────────────────────── # Each @GPU callback has a NEW CUDA context; old renderer/tone_mapper # pointers are invalid and must be fully deleted, not just set to None. if hasattr(PIPELINE, 'renderer') and PIPELINE.renderer is not None: try: del PIPELINE.renderer except Exception as e: print(f"[NeAR] warning: failed to delete old renderer: {e}", flush=True) PIPELINE.renderer = None if hasattr(PIPELINE, 'tone_mapper') and PIPELINE.tone_mapper is not None: try: del PIPELINE.tone_mapper except Exception as e: print(f"[NeAR] warning: failed to delete old tone_mapper: {e}", flush=True) PIPELINE.tone_mapper = None _free_cuda() # Clear any lingering GPU memory # ── Move to new CUDA context and re-init fresh objects ────────────── if torch.cuda.is_available(): PIPELINE.to("cuda") # Initialize fresh renderer/tone_mapper in the new CUDA context print("[NeAR] initializing renderer/tone_mapper in fresh CUDA context…", flush=True) PIPELINE.setup_renderer() PIPELINE.setup_tone_mapper("AgX") return PIPELINE def _teardown_near() -> None: """Release CUDA-backed objects; move weights back to CPU. ZeroGPU contract: always teardown CUDA objects before exiting @GPU callback. The next callback will get a completely fresh CUDA context. """ if PIPELINE is None: return print("[NeAR] tearing down renderer/tone_mapper…", flush=True) # Explicitly delete CUDA objects (not just None assignment) if hasattr(PIPELINE, 'renderer'): try: if PIPELINE.renderer is not None: del PIPELINE.renderer except Exception as e: print(f"[NeAR] warning: failed to delete renderer in teardown: {e}", flush=True) PIPELINE.renderer = None if hasattr(PIPELINE, 'tone_mapper'): try: if PIPELINE.tone_mapper is not None: del PIPELINE.tone_mapper except Exception as e: print(f"[NeAR] warning: failed to delete tone_mapper in teardown: {e}", flush=True) PIPELINE.tone_mapper = None # Move model weights back to CPU if torch.cuda.is_available(): PIPELINE.to("cpu") _free_cuda() def _ensure_geometry_on_cuda() -> Hunyuan3DDiTFlowMatchingPipeline: # Hunyuan3D is pre-loaded into CPU RAM by _preload_worker, so this is # usually a no-op. Lock is released before .to("cuda"). with _MODEL_LOCK: _load_geometry_cpu_locked() assert GEOMETRY_PIPELINE is not None if torch.cuda.is_available(): GEOMETRY_PIPELINE.to("cuda") return GEOMETRY_PIPELINE def _teardown_geometry() -> None: if GEOMETRY_PIPELINE is None: return if torch.cuda.is_available(): GEOMETRY_PIPELINE.to("cpu") _free_cuda() # ── Image preprocessing ─────────────────────────────────────────────────────── def _get_rembg() -> BackgroundRemover: global _LIGHT_PREPROCESSOR if _LIGHT_PREPROCESSOR is None: _LIGHT_PREPROCESSOR = BackgroundRemover() return _LIGHT_PREPROCESSOR def _ensure_rgba(img: Image.Image) -> Image.Image: return img if img.mode == "RGBA" else img.convert("RGBA") def _preprocess_to_518(image: Image.Image) -> Image.Image: """Rembg if no alpha → crop to subject → resize to 518×518 RGBA.""" image = _ensure_rgba(image) alpha = np.array(image)[:, :, 3] has_alpha = not np.all(alpha == 255) if has_alpha: out = image else: rgb = image.convert("RGB") max_side = max(rgb.size) if max_side > 1024: scale = 1024 / max_side rgb = rgb.resize((int(rgb.width * scale), int(rgb.height * scale)), Image.Resampling.LANCZOS) out = _get_rembg()(rgb) out = _ensure_rgba(out) alpha = np.array(out)[:, :, 3] pts = np.argwhere(alpha > 0.8 * 255) if pts.size == 0: return out.resize((518, 518), Image.Resampling.LANCZOS) r0, c0, r1, c1 = pts[:, 0].min(), pts[:, 1].min(), pts[:, 0].max(), pts[:, 1].max() cy, cx = (r0 + r1) / 2, (c0 + c1) / 2 size = int(max(r1 - r0, c1 - c0) * 1.2) crop = out.crop((cx - size // 2, cy - size // 2, cx + size // 2, cy + size // 2)) return crop.resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA") def _flatten_rgba(image: Image.Image, matte: tuple[float, float, float]) -> Image.Image: bg = Image.new("RGBA", image.size, tuple(int(c * 255) for c in matte) + (255,)) return Image.alpha_composite(bg, _ensure_rgba(image)).convert("RGB") def preprocess_image_only(img: Optional[Image.Image]) -> Optional[Image.Image]: if img is None: return None return _preprocess_to_518(img) # ── SLaT I/O ────────────────────────────────────────────────────────────────── def _save_slat(slat: Any, path: Path) -> None: np.savez_compressed(path, feats=slat.feats.detach().cpu().numpy(), coords=slat.coords.detach().cpu().numpy()) def _require_slat(st: Dict[str, Any]) -> str: p = st.get("slat_path") if not p or not os.path.isfile(str(p)): raise gr.Error("Generate or upload a SLaT (.npz) first.") return str(p) def preview_hdri(hdri_file_obj: Any) -> Optional[np.ndarray]: """Tone-map HDRI to LDR for display; no GPU needed.""" p = hdri_file_obj if isinstance(hdri_file_obj, str) else \ getattr(hdri_file_obj, "name", None) or getattr(hdri_file_obj, "path", None) if not p or not os.path.isfile(str(p)): return None try: import pyexr # type: ignore from simple_ocio import ToneMapper hdri_np = pyexr.read(str(p))[..., :3] ldr = ToneMapper(view="Khronos PBR Neutral").hdr_to_ldr(hdri_np) return (np.clip(ldr, 0, 1) * 255).astype(np.uint8) except Exception: return None def _load_hdri_resized(pipe: NeARImageToRelightable3DPipeline, hdri_path: str, target_w: int = 1024, target_h: int = 512) -> np.ndarray: """Load HDRI and resize to target_w×target_h with nearest-neighbour if needed.""" import cv2 hdri_np = pipe.load_hdri(hdri_path) h, w = hdri_np.shape[:2] if w != target_w or h != target_h: hdri_np = cv2.resize(hdri_np, (target_w, target_h), interpolation=cv2.INTER_NEAREST) return hdri_np def _require_hdri(hdri_obj: Any) -> str: p = hdri_obj if isinstance(hdri_obj, str) else getattr(hdri_obj, "name", None) or getattr(hdri_obj, "path", None) if not p or not os.path.isfile(p): raise gr.Error("Upload an HDRI .exr file.") return p # ── Session ─────────────────────────────────────────────────────────────────── def start_session(req: gr.Request) -> None: (CACHE_DIR / str(req.session_hash)).mkdir(parents=True, exist_ok=True) def end_session(req: gr.Request) -> None: shutil.rmtree(CACHE_DIR / str(req.session_hash), ignore_errors=True) # ── GPU callbacks ───────────────────────────────────────────────────────────── @GPU @torch.no_grad() def generate_geometry( image_input: Optional[Image.Image], req: gr.Request, progress: gr.Progress = gr.Progress(track_tqdm=True), ) -> tuple[Dict[str, Any], str, str]: """① Hunyuan3D geometry generation.""" if image_input is None: raise gr.Error("Upload an input image first.") session_dir = CACHE_DIR / str(req.session_hash) session_dir.mkdir(parents=True, exist_ok=True) rgba = _ensure_rgba(image_input) if rgba.size != (518, 518): progress(0.1, desc="Preprocessing image") rgba = _preprocess_to_518(rgba) mesh_rgb = _flatten_rgba(rgba, (1.0, 1.0, 1.0)) rgba.save(session_dir / "input_rgba.png") progress(0.3, desc="Loading Hunyuan3D on GPU") geo = _ensure_geometry_on_cuda() progress(0.5, desc="Generating geometry…") mesh = geo(image=mesh_rgb)[0] mesh_path = session_dir / "geometry.glb" mesh.export(mesh_path) del mesh _teardown_geometry() state = {"mesh_path": str(mesh_path), "slat_path": None} return state, str(mesh_path), f"**① Geometry ready** → `{mesh_path.name}`. Run **② SLaT** next." @GPU @torch.no_grad() def generate_slat( asset_state: Dict[str, Any], image_input: Optional[Image.Image], seed: int, req: gr.Request, progress: gr.Progress = gr.Progress(track_tqdm=True), ) -> tuple[Dict[str, Any], str]: """② SLaT prediction (NeAR).""" mesh_path = asset_state.get("mesh_path") if not mesh_path or not os.path.isfile(mesh_path): raise gr.Error("Run **① Geometry** first (or upload a mesh).") if image_input is None: raise gr.Error("Upload an input image.") session_dir = CACHE_DIR / str(req.session_hash) progress(0.1, desc="Loading mesh") mesh = trimesh.load(mesh_path, force="mesh") rgba = _ensure_rgba(image_input) if rgba.size != (518, 518): rgba = _preprocess_to_518(rgba) slat_img = _flatten_rgba(rgba, (0.0, 0.0, 0.0)) progress(0.2, desc="Loading NeAR on GPU") pipe = _ensure_near_on_cuda() progress(0.4, desc="Computing SLaT coords") coords = pipe.shape_to_coords(mesh) del mesh _free_cuda() progress(0.6, desc="Sampling SLaT…") slat = pipe.run_with_coords([slat_img], coords, seed=int(seed), preprocess_image=False) del coords slat_path = session_dir / "slat.npz" _save_slat(slat, slat_path) del slat _teardown_near() new_state = {**asset_state, "slat_path": str(slat_path)} return new_state, f"**② SLaT saved** → `{slat_path.name}`. Run **③ Videos** or **④ GLB** next." def load_slat_file( slat_upload: Any, slat_path_text: str, req: gr.Request, ) -> tuple[Dict[str, Any], str]: """Load a pre-computed SLaT .npz (skips steps ①②).""" if slat_upload is not None: resolved = slat_upload if isinstance(slat_upload, str) else \ getattr(slat_upload, "name", None) or getattr(slat_upload, "path", None) else: resolved = slat_path_text.strip() if slat_path_text else None if not resolved or not os.path.isfile(str(resolved)): raise gr.Error("Provide a valid .npz path or upload the file.") session_dir = CACHE_DIR / str(req.session_hash) session_dir.mkdir(parents=True, exist_ok=True) state: Dict[str, Any] = {"mesh_path": None, "slat_path": str(resolved)} return state, f"SLaT loaded: `{Path(resolved).name}`" @GPU @torch.no_grad() def generate_videos( asset_state: Dict[str, Any], hdri_file_obj: Any, hdri_rot: float, fps: int, num_cam: int, num_hdri: int, yaw: float, pitch: float, fov: float, radius: float, req: gr.Request, progress: gr.Progress = gr.Progress(track_tqdm=True), ) -> tuple[str, str, str, str]: """③ Render camera-orbit + HDRI-rotation videos. Kept as a separate @GPU callback so it fits inside the 120 s ZeroGPU lease. Returns: (cam_video, hdri_video, roll_video, status_msg) """ slat_path = _require_slat(asset_state) hdri_path = _require_hdri(hdri_file_obj) session_dir = CACHE_DIR / str(req.session_hash) progress(0.05, desc="Loading NeAR on GPU") pipe = _ensure_near_on_cuda() progress(0.20, desc="Loading SLaT / HDRI") slat = pipe.load_slat(slat_path) hdri_np = _load_hdri_resized(pipe, hdri_path) # ── Camera-orbit video ──────────────────────────────────────────── progress(0.30, desc="Rendering camera-orbit video…") cam_frames = pipe.render_camera_path_video( slat, hdri_np, num_views=int(num_cam), fov=float(fov), radius=float(radius), hdri_rot_deg=float(hdri_rot), full_video=True, shadow_video=True, bg_color=(1, 1, 1), verbose=True, ) p_cam = session_dir / "video_camera_orbit.mp4" imageio.mimsave(p_cam, cam_frames, fps=int(fps)) del cam_frames _free_cuda() # ── HDRI-rotation videos ────────────────────────────────────────── progress(0.65, desc="Rendering HDRI-rotation video…") roll_frames, hdri_frames = pipe.render_hdri_rotation_video( slat, hdri_np, num_frames=int(num_hdri), yaw_deg=float(yaw), pitch_deg=float(pitch), fov=float(fov), radius=float(radius), full_video=True, shadow_video=True, bg_color=(1, 1, 1), verbose=True, ) p_hdri = session_dir / "video_hdri_rotation.mp4" p_roll = session_dir / "video_env_roll.mp4" imageio.mimsave(p_hdri, hdri_frames, fps=int(fps)) imageio.mimsave(p_roll, roll_frames, fps=int(fps)) del hdri_frames, roll_frames, slat, hdri_np _free_cuda() _teardown_near() msg = ( f"**③ Videos ready** → `{Path(p_cam).name}`, " f"`{Path(p_hdri).name}`, `{Path(p_roll).name}`" ) return str(p_cam), str(p_hdri), str(p_roll), msg @GPU def generate_glb( asset_state: Dict[str, Any], hdri_file_obj: Any, hdri_rot: float, simplify: float, texture_size: int, req: gr.Request, progress: gr.Progress = gr.Progress(track_tqdm=True), ) -> tuple[str, str]: """④ Export PBR GLB (baked texture + simplified mesh). Separate @GPU callback so the heavy mesh simplification does not blow the 120 s budget shared with video rendering. Returns: (pbr_glb, status_msg) """ slat_path = _require_slat(asset_state) hdri_path = _require_hdri(hdri_file_obj) session_dir = CACHE_DIR / str(req.session_hash) progress(0.05, desc="Loading NeAR on GPU") pipe = _ensure_near_on_cuda() progress(0.15, desc="Loading SLaT / HDRI / mesh") slat = pipe.load_slat(slat_path) hdri_np = _load_hdri_resized(pipe, hdri_path) mesh_path = asset_state.get("mesh_path") base_mesh: Optional[trimesh.Trimesh] = None if mesh_path and os.path.isfile(mesh_path): print(f"[NeAR] loading Hunyuan mesh from {mesh_path}…", flush=True) raw = trimesh.load(mesh_path, force="mesh") print(f"[NeAR] raw mesh bounds: {raw.bounds[0].round(3)} → {raw.bounds[1].round(3)}", flush=True) base_mesh = _hunyuan_mesh_to_renderer_space(raw) del raw else: print("[NeAR] no mesh_path — will use SLaT decoder mesh for GLB", flush=True) progress(0.30, desc="Baking PBR GLB…") glb = pipe.export_glb_from_slat( slat, hdri_np, hdri_rot_deg=float(hdri_rot), base_mesh=base_mesh, simplify=float(simplify), texture_size=int(texture_size), fill_holes=True, ) del slat, hdri_np, base_mesh _free_cuda() out_path = session_dir / "near_pbr.glb" glb.export(out_path) del glb _teardown_near() msg = f"**④ PBR GLB ready** → `{Path(out_path).name}`" return str(out_path), msg def _hunyuan_mesh_to_renderer_space(mesh: trimesh.Trimesh) -> trimesh.Trimesh: """Transform a Hunyuan3D output mesh into the coordinate space expected by NeAR's renderer / shape_to_coords: Y-up → Z-up, scale to [-0.5, 0.5]³. Hunyuan outputs: Y-up, vertices in ~[-1.01, 1.01]³. NeAR renderer: Z-up, vertices in [-0.5, 0.5]³. """ mesh = mesh.copy() # Y-up → Z-up R = trimesh.transformations.rotation_matrix(np.pi / 2, [1, 0, 0]) mesh.apply_transform(R) # [-1.01,1.01] → [-0.5,0.5] (multiply by 0.5, same as shape_to_coords) mesh.vertices = mesh.vertices * 0.5 bounds = mesh.bounds # [[xmin,ymin,zmin],[xmax,ymax,zmax]] print( f"[NeAR] base_mesh after transform: " f"bounds={bounds[0].round(3)} → {bounds[1].round(3)}, " f"vertices={len(mesh.vertices)}, faces={len(mesh.faces)}", flush=True, ) return mesh def clear_cache(req: gr.Request) -> str: """Clear session cache and free GPU memory.""" session_dir = CACHE_DIR / str(req.session_hash) shutil.rmtree(session_dir, ignore_errors=True) session_dir.mkdir(parents=True, exist_ok=True) _free_cuda() return "Session cache cleared." # ── UI ──────────────────────────────────────────────────────────────────────── CSS = """ .gradio-container { max-width: 100% !important; width: 100% !important; } footer { display: none !important; } /* remove all heavy block borders everywhere */ .gradio-container .block, .gradio-container .panel, .gradio-container .form { box-shadow: none !important; } .section-kicker { font-size: 0.68rem !important; font-weight: 700 !important; color: #9ca3af !important; text-transform: uppercase !important; letter-spacing: 0.09em !important; margin: 0.6rem 0 0.3rem 0 !important; } /* HDRI preview: no frame */ .hdri-preview-image, .hdri-preview-image .wrap, .hdri-preview-image .image-container, .hdri-preview-image img { border: none !important; outline: none !important; box-shadow: none !important; } .hdri-preview-image img { border-radius: 6px !important; } .gradio-container .hdri-preview-image { --block-border-width: 0px !important; } /* status bar: subtle, no thick left accent */ .status-footer { font-size: 0.82rem !important; line-height: 1.5 !important; color: var(--body-text-color-subdued, #6b7280) !important; margin: 0 0 0.5rem 0 !important; padding: 0.4rem 0.6rem !important; background: transparent !important; border: none !important; border-bottom: 1px solid var(--border-color-primary, #e5e7eb) !important; border-radius: 0 !important; box-shadow: none !important; min-height: 2rem !important; --block-border-width: 0px !important; } .status-footer p, .status-footer .prose p { margin: 0 !important; } /* sidebar examples: 2-col image grid */ .img-gallery table { display:grid !important; grid-template-columns:repeat(2,1fr) !important; gap:3px !important; } .img-gallery table thead { display:none !important; } .img-gallery table tr { display:contents !important; } .img-gallery table td { padding:0 !important; } .img-gallery table td img { width:100% !important; height:72px !important; object-fit:cover !important; border-radius:4px !important; } .sidebar-examples { min-width: 0 !important; } .sidebar-examples .label-wrap { font-size: 0.82rem !important; } /* left column: tighten vertical spacing */ .gradio-container .block { margin-bottom: 0 !important; padding-bottom: 0 !important; } .gradio-container .form { gap: 4px !important; } .gradio-container .gap { gap: 4px !important; } /* section kicker: no gap above/below */ .section-kicker { margin: 0.4rem 0 0.1rem 0 !important; padding: 0 !important; line-height: 1 !important; } /* html block that wraps section-kicker: strip default block padding */ .gradio-container .prose:has(.section-kicker) { padding: 0 !important; margin: 0 !important; } .gradio-container span:has(.section-kicker) { padding: 0 !important; } """ THEME = gr.themes.Base(primary_hue=gr.themes.colors.blue) def _collect_examples() -> list[list[str]]: """Return [[path], ...] for real (non-LFS) PNGs in assets/example_image/.""" img_dir = APP_DIR / "assets/example_image" if not img_dir.is_dir(): return [] # Prefer the curated 'typical_*' set; fall back to everything else. preferred = sorted(img_dir.glob("typical_*.png")) others = sorted(p for p in img_dir.glob("*.png") if not p.name.startswith("typical_")) ordered = preferred + others return [[str(p)] for p in ordered if not _path_is_git_lfs(p)] def build_app() -> gr.Blocks: if _path_is_git_lfs(DEFAULT_IMAGE): print("[NeAR] WARNING: example image is a Git LFS pointer — run `git lfs pull`.", flush=True) _img_ex = _collect_examples() _slat_ex = [ [str(p)] for p in sorted((APP_DIR / "assets/example_slats").glob("*.npz")) if not _path_is_git_lfs(p) ] _hdri_ex = [ [str(p)] for p in sorted((APP_DIR / "assets/hdris").glob("*.exr")) if not _path_is_git_lfs(p) ] import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) _blocks_ctx = gr.Blocks(title="NeAR — Relightable 3DGS", theme=THEME, css=CSS, fill_width=True) with _blocks_ctx as demo: asset_state = gr.State({}) gr.Markdown( "## NeAR — Image-to-Relightable 3D Gaussian Splatting\n" "**① Geometry** → **② SLaT** → **③ Videos** → **④ PBR GLB**\n\n" "Upload an image and walk through the four steps. " "Geometry and appearance are decoupled — after step ① you can swap the input image " "before step ② to retexture the same mesh with a different style." ) with gr.Row(equal_height=False): # ── LEFT: narrow controls ───────────────────────────────────────── with gr.Column(scale=1, min_width=300): gr.HTML('
Asset
') image_input = gr.Image( label="Input image (RGBA or RGB)", type="pil", image_mode="RGBA", value=str(DEFAULT_IMAGE) if DEFAULT_IMAGE.exists() and not _path_is_git_lfs(DEFAULT_IMAGE) else None, height=300, ) seed = gr.Slider(0, MAX_SEED, value=43, step=1, label="SLaT seed") btn_geo = gr.Button("① Generate Geometry", variant="primary") btn_slat = gr.Button("② Generate SLaT", variant="primary") with gr.Accordion("Load existing SLaT (.npz)", open=False): slat_upload = gr.File(label="Upload .npz", file_types=[".npz"]) slat_path_txt = gr.Textbox(label="Or paste path", placeholder="/path/to/slat.npz") btn_load_slat = gr.Button("Load SLaT") if _slat_ex: gr.Examples(examples=_slat_ex, inputs=[slat_path_txt], label="Example SLaTs", examples_per_page=6) gr.HTML('Environment (HDRI)
') hdri_file = gr.File( label="Environment .exr", file_types=[".exr"], value=str(DEFAULT_HDRI) if DEFAULT_HDRI.exists() else None, ) hdri_preview = gr.Image( interactive=False, height=110, container=False, elem_classes=["hdri-preview-image"], ) hdri_rot = gr.Slider(0, 360, value=0, step=1, label="HDRI rotation °") gr.HTML('Actions
') btn_videos = gr.Button("③ Generate Videos", variant="primary") btn_glb = gr.Button("④ Export PBR GLB", variant="primary") with gr.Accordion("Video / export settings", open=False): fps = gr.Slider(8, 48, value=24, step=1, label="FPS") num_cam = gr.Slider(8, 96, value=48, step=2, label="Camera-orbit frames") num_hdri = gr.Slider(8, 96, value=48, step=2, label="HDRI-rotation frames") with gr.Row(): yaw = gr.Slider(0, 360, value=0, step=1, label="Yaw °") pitch = gr.Slider(-90, 90, value=0, step=1, label="Pitch °") with gr.Row(): fov = gr.Slider(10, 70, value=40, step=1, label="FoV") radius = gr.Slider(1.0, 4.0, value=2.0, step=0.05, label="Radius") simplify = gr.Slider(0.80, 0.99, value=0.95, step=0.01, label="GLB simplify ratio") tex_size = gr.Slider(512, 2048, value=1024, step=512, label="Texture resolution") btn_clear = gr.Button("Clear session cache", variant="secondary") # ── CENTER: linear output area ──────────────────────────────────── with gr.Column(scale=8, min_width=520): status = gr.Markdown( "Ready — upload an image and click **① Generate Geometry** to begin.", elem_classes=["status-footer"], ) with gr.Row(): mesh_view = gr.Model3D(label="① Geometry", height=250) glb_view = gr.Model3D(label="④ PBR GLB (download)", height=250) vid_cam = gr.Video(label="③ Camera orbit", autoplay=True, loop=True, height=250) vid_hdri = gr.Video(label="③ HDRI rotation", autoplay=True, loop=True, height=250) with gr.Accordion("HDRI env roll", open=False): vid_roll = gr.Video(label="③ HDRI env roll", autoplay=True, loop=True, height=180) # ── RIGHT: examples sidebar ─────────────────────────────────────── with gr.Column(scale=2, min_width=160, elem_classes=["sidebar-examples"]): if _img_ex: with gr.Column(elem_classes=["img-gallery"]): gr.Examples( examples=_img_ex, inputs=[image_input], fn=preprocess_image_only, outputs=[image_input], run_on_click=True, label="Image examples", examples_per_page=18, ) if _hdri_ex: gr.Examples( examples=_hdri_ex, inputs=[hdri_file], label="HDRI examples", examples_per_page=8, ) # ── Event wiring ────────────────────────────────────────────────────── demo.load(start_session) demo.unload(end_session) image_input.upload(preprocess_image_only, [image_input], [image_input]) for _evt in (hdri_file.upload, hdri_file.change): _evt(preview_hdri, [hdri_file], [hdri_preview]) btn_geo.click(generate_geometry, [image_input], [asset_state, mesh_view, status]) btn_slat.click(generate_slat, [asset_state, image_input, seed], [asset_state, status]) btn_load_slat.click(load_slat_file, [slat_upload, slat_path_txt], [asset_state, status]) btn_videos.click(generate_videos, [asset_state, hdri_file, hdri_rot, fps, num_cam, num_hdri, yaw, pitch, fov, radius], [vid_cam, vid_hdri, vid_roll, status]) btn_glb.click(generate_glb, [asset_state, hdri_file, hdri_rot, simplify, tex_size], [glb_view, status]) btn_clear.click(clear_cache, [], [status]) return demo demo = build_app() demo.queue(max_size=8) # ── Startup ─────────────────────────────────────────────────────────────────── if _CPU_PRELOAD: threading.Thread(target=_preload_worker, daemon=True, name="near-cpu-preload").start() if __name__ == "__main__": demo.launch(theme=THEME, css=CSS)