| import os |
| import sys |
| import shutil |
| import threading |
| import time |
| from pathlib import Path |
| from typing import Any, Dict, Optional |
|
|
| import gradio as gr |
|
|
| try: |
| import spaces |
| except ImportError: |
| spaces = None |
| import imageio |
| import numpy as np |
| import torch |
| import trimesh |
| from PIL import Image |
| from simple_ocio import ToneMapper |
|
|
| sys.path.insert(0, "./hy3dshape") |
| os.environ.setdefault("ATTN_BACKEND", "xformers") |
| os.environ.setdefault("SPCONV_ALGO", "native") |
| os.environ.setdefault("TORCH_CUDA_ARCH_LIST", "7.5;8.0;8.6;8.9;9.0") |
|
|
|
|
| from trellis.pipelines import NeARImageToRelightable3DPipeline |
| from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline |
|
|
| GPU = spaces.GPU if spaces is not None else (lambda f: f) |
|
|
| APP_DIR = Path(__file__).resolve().parent |
| CACHE_DIR = APP_DIR / "tmp_gradio" |
| CACHE_DIR.mkdir(exist_ok=True) |
|
|
|
|
| def _path_is_git_lfs_pointer(p: Path) -> bool: |
| try: |
| if not p.is_file(): |
| return False |
| if p.stat().st_size > 512: |
| return False |
| head = p.read_bytes()[:120] |
| return head.startswith(b"version https://git-lfs.github.com/spec/v1") |
| except OSError: |
| return False |
|
|
|
|
| def _warn_example_assets() -> None: |
| img_dir = APP_DIR / "assets/example_image" |
| if not img_dir.is_dir(): |
| print( |
| "[NeAR] WARNING: assets/example_image/ is missing — commit and push the full assets/ tree.", |
| flush=True, |
| ) |
| return |
| sample = img_dir / "T.png" |
| if sample.is_file() and _path_is_git_lfs_pointer(sample): |
| print( |
| "[NeAR] WARNING: assets look like Git LFS pointers (not real PNG/NPZ/EXR bytes). " |
| "Run: git lfs install && git lfs push --all origin (from a clone that has full files).", |
| flush=True, |
| ) |
|
|
|
|
| _warn_example_assets() |
|
|
| DEFAULT_IMAGE = APP_DIR / "assets/example_image/T.png" |
| DEFAULT_HDRI = APP_DIR / "assets/hdris/studio_small_03_1k.exr" |
| MAX_SEED = np.iinfo(np.int32).max |
|
|
|
|
| def start_session(req: gr.Request): |
| user_dir = CACHE_DIR / str(req.session_hash) |
| os.makedirs(user_dir, exist_ok=True) |
| |
| |
| def end_session(req: gr.Request): |
| user_dir = CACHE_DIR / str(req.session_hash) |
| shutil.rmtree(user_dir) |
| _SESSION_SLAT.pop(str(req.session_hash), None) |
|
|
|
|
| def get_file_path(file_obj: Any) -> Optional[str]: |
| if file_obj is None: |
| return None |
| if isinstance(file_obj, str): |
| return file_obj |
| for attr in ("name", "path", "value"): |
| v = getattr(file_obj, attr, None) |
| if isinstance(v, str) and v: |
| return v |
| return None |
|
|
|
|
| PIPELINE: Optional[NeARImageToRelightable3DPipeline] = None |
| GEOMETRY_PIPELINE: Optional[Hunyuan3DDiTFlowMatchingPipeline] = None |
| tone_mapper = ToneMapper() |
| AVAILABLE_TONE_MAPPERS = getattr(tone_mapper, "available_views", ["AgX"]) |
|
|
| |
| _SESSION_SLAT: Dict[str, Any] = {} |
|
|
| def set_tone_mapper(view_name: str): |
| if view_name and PIPELINE is not None: |
| PIPELINE.setup_tone_mapper(view_name) |
|
|
| from hy3dshape.rembg import BackgroundRemover |
| LIGHT_PREPROCESSOR = BackgroundRemover() |
|
|
|
|
| def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image: |
| image = _ensure_rgba(input_image) |
| has_alpha = False |
| if image.mode == "RGBA": |
| alpha = np.array(image)[:, :, 3] |
| has_alpha = not np.all(alpha == 255) |
|
|
| if has_alpha: |
| output = image |
| else: |
| rgb = image.convert("RGB") |
| max_size = max(rgb.size) |
| scale = min(1, 1024 / max_size) |
| if scale < 1: |
| rgb = rgb.resize( |
| (int(rgb.width * scale), int(rgb.height * scale)), |
| Image.Resampling.LANCZOS, |
| ) |
| output = LIGHT_PREPROCESSOR(rgb) |
|
|
| if output.mode != "RGBA": |
| output = output.convert("RGBA") |
| output_np = np.array(output) |
| alpha = output_np[:, :, 3] |
| bbox = np.argwhere(alpha > 0.8 * 255) |
| if bbox.size == 0: |
| return output.resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA") |
| crop_bbox = ( |
| int(np.min(bbox[:, 1])), |
| int(np.min(bbox[:, 0])), |
| int(np.max(bbox[:, 1])), |
| int(np.max(bbox[:, 0])), |
| ) |
| center = ((crop_bbox[0] + crop_bbox[2]) / 2, (crop_bbox[1] + crop_bbox[3]) / 2) |
| size = max(crop_bbox[2] - crop_bbox[0], crop_bbox[3] - crop_bbox[1]) |
| size = int(size * 1.2) |
| padded_bbox = ( |
| center[0] - size // 2, |
| center[1] - size // 2, |
| center[0] + size // 2, |
| center[1] + size // 2, |
| ) |
| return output.crop(padded_bbox).resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA") |
|
|
|
|
| def _flatten_rgba_on_matte(image: Image.Image, matte_rgb: tuple[float, float, float]) -> Image.Image: |
| return NeARImageToRelightable3DPipeline.flatten_rgba_on_matte(image, matte_rgb) |
|
|
|
|
| def preview_hdri(hdri_file_obj: Any): |
| hdri_path = get_file_path(hdri_file_obj) |
| if not hdri_path: |
| return None, "Upload an HDRI `.exr` (left column)." |
| import pyexr |
|
|
| hdri_np = pyexr.read(hdri_path)[..., :3] |
| tm = ToneMapper(view="Khronos PBR Neutral") |
| preview = tm.hdr_to_ldr(hdri_np) |
| preview = (np.clip(preview, 0, 1) * 255).astype(np.uint8) |
| name = Path(hdri_path).name |
| return preview, f"HDRI **{name}** — preview updated." |
|
|
|
|
| def switch_asset_source(mode: str): |
| return gr.Tabs(selected=1 if mode == "From Existing SLaT" else 0) |
|
|
|
|
| def _ensure_rgba(img: Image.Image) -> Image.Image: |
| if img.mode == "RGBA": |
| return img |
| return img.convert("RGBA") |
|
|
|
|
| @torch.inference_mode() |
| def preprocess_image_only(image_input: Optional[Image.Image]): |
| if image_input is None: |
| return None |
| return _preprocess_image_rgba_light(image_input) |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def generate_mesh( |
| image_input: Optional[Image.Image], |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| session_dir = CACHE_DIR / str(req.session_hash) |
|
|
| if image_input is None: |
| raise gr.Error("Please upload an input image.") |
|
|
| rgba = _ensure_rgba(image_input) |
| if rgba.size != (518, 518): |
| rgba = _preprocess_image_rgba_light(rgba) |
| |
| mesh_rgb = _flatten_rgba_on_matte(rgba, (1.0, 1.0, 1.0)) |
| rgba.save(session_dir / "input_preprocessed_rgba.png") |
| mesh_rgb.save(session_dir / "input_processed.png") |
|
|
| progress(0.6, desc="Generating geometry") |
| mesh = GEOMETRY_PIPELINE(image=mesh_rgb)[0] |
| mesh_path = session_dir / "initial_3d_shape.glb" |
| mesh.export(mesh_path) |
|
|
| _SESSION_SLAT.pop(str(req.session_hash), None) |
| state = { |
| "mode": "image", |
| "mesh_path": str(mesh_path), |
| "processed_image_path": str(session_dir / "input_processed.png"), |
| "slat_path": None, |
| "slat_in_memory": False, |
| } |
| return ( |
| state, |
| str(mesh_path), |
| "**Mesh ready** — Click **② Generate / Load SLaT** to continue.", |
| ) |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def _generate_slat_inner( |
| asset_state: Dict[str, Any], |
| image_input: Optional[Image.Image], |
| seed: int, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| """GPU body for SLaT generation — must be called from within a @GPU context.""" |
| if not asset_state or not asset_state.get("mesh_path"): |
| raise gr.Error("Please run ① Generate Mesh first.") |
| mesh_path = asset_state["mesh_path"] |
| if not os.path.exists(mesh_path): |
| raise gr.Error("Mesh file not found — please regenerate the mesh.") |
|
|
| if image_input is None: |
| raise gr.Error("Preprocessed image not found — please upload the image again.") |
|
|
| progress(0.1, desc="Loading mesh") |
| mesh = trimesh.load(mesh_path, force="mesh") |
| rgba = _ensure_rgba(image_input) |
| if rgba.size != (518, 518): |
| rgba = _preprocess_image_rgba_light(rgba) |
| slat_rgb = _flatten_rgba_on_matte(rgba, (0.0, 0.0, 0.0)) |
|
|
| progress(0.3, desc="Computing SLaT coordinates") |
| coords = PIPELINE.shape_to_coords(mesh) |
|
|
| progress(0.6, desc="Generating SLaT") |
| slat = PIPELINE.run_with_coords([slat_rgb], coords, seed=int(seed), preprocess_image=False) |
|
|
| _SESSION_SLAT[str(req.session_hash)] = slat |
| new_state = {**asset_state, "slat_path": None, "slat_in_memory": True} |
| return new_state, f"**Asset ready** — SLaT generated (seed `{seed}`)." |
|
|
|
|
| def _load_slat_file_inner(slat_upload: Any, slat_path_text: str, req: gr.Request): |
| resolved = get_file_path(slat_upload) or (slat_path_text.strip() if slat_path_text else "") |
| if not resolved: |
| raise gr.Error("Please provide a SLaT `.npz` path or upload one.") |
| if not os.path.exists(resolved): |
| raise gr.Error(f"SLaT file not found: `{resolved}`") |
| _SESSION_SLAT.pop(str(req.session_hash), None) |
| state = { |
| "mode": "slat", |
| "slat_path": resolved, |
| "mesh_path": None, |
| "processed_image_path": None, |
| "slat_in_memory": False, |
| } |
| return state, f"SLaT **{Path(resolved).name}** loaded." |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def prepare_slat( |
| source_mode: str, |
| asset_state: Dict[str, Any], |
| image_input: Optional[Image.Image], |
| seed: int, |
| slat_upload: Any, |
| slat_path_text: str, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| if source_mode == "From Image": |
| return _generate_slat_inner(asset_state, image_input, seed, req, progress) |
| return _load_slat_file_inner(slat_upload, slat_path_text, req) |
|
|
|
|
| def require_asset_state(asset_state: Optional[Dict[str, Any]]) -> Dict[str, Any]: |
| if not asset_state: |
| raise gr.Error("Please generate or load a SLaT first.") |
| if asset_state.get("slat_in_memory") or asset_state.get("slat_path"): |
| return asset_state |
| raise gr.Error("Please generate or load a SLaT first.") |
|
|
|
|
| def load_asset_and_hdri(asset_state: Dict[str, Any], hdri_file_obj: Any, req: gr.Request): |
| asset_state = require_asset_state(asset_state) |
| hdri_path = get_file_path(hdri_file_obj) |
| if not hdri_path: |
| raise gr.Error("Please upload an HDRI `.exr` file.") |
| if asset_state.get("slat_in_memory"): |
| slat = _SESSION_SLAT.get(str(req.session_hash)) |
| if slat is None: |
| raise gr.Error("SLaT session expired — run **② Generate / Load SLaT** again.") |
| else: |
| slat_path = asset_state.get("slat_path") |
| if not slat_path: |
| raise gr.Error("Please generate or load a SLaT first.") |
| slat = PIPELINE.load_slat(slat_path) |
| hdri_np = PIPELINE.load_hdri(hdri_path) |
| return slat, hdri_np |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def render_preview( |
| asset_state: Dict[str, Any], |
| hdri_file_obj: Any, |
| hdri_rot: float, |
| yaw: float, |
| pitch: float, |
| fov: float, |
| radius: float, |
| resolution: int, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| t0 = time.time() |
| session_dir = CACHE_DIR / str(req.session_hash) |
| progress(0.1, desc="Loading SLaT and HDRI") |
| slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req) |
|
|
| progress(0.5, desc="Rendering") |
| views = PIPELINE.render_view( |
| slat, hdri_np, |
| yaw_deg=yaw, pitch_deg=pitch, fov=fov, radius=radius, |
| hdri_rot_deg=hdri_rot, resolution=int(resolution), |
| ) |
| for key, image in views.items(): |
| image.save(session_dir / f"preview_{key}.png") |
| print(f"[NeAR] render_preview {time.time() - t0:.1f}s", flush=True) |
|
|
| msg = ( |
| f"**Preview done** — " |
| f"yaw `{yaw:.0f}°` pitch `{pitch:.0f}°` · " |
| f"fov `{fov:.0f}` radius `{radius:.1f}` · HDRI rot `{hdri_rot:.0f}°`" |
| ) |
| return ( |
| views["color"], |
| views["base_color"], |
| views["metallic"], |
| views["roughness"], |
| views["shadow"], |
| msg, |
| ) |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def render_camera_video( |
| asset_state: Dict[str, Any], |
| hdri_file_obj: Any, |
| hdri_rot: float, |
| fps: int, |
| num_views: int, |
| fov: float, |
| radius: float, |
| full_video: bool, |
| shadow_video: bool, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| t0 = time.time() |
| session_dir = CACHE_DIR / str(req.session_hash) |
| progress(0.1, desc="Loading SLaT and HDRI") |
| slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req) |
|
|
| progress(0.4, desc="Rendering camera path") |
| frames = PIPELINE.render_camera_path_video( |
| slat, hdri_np, |
| num_views=int(num_views), fov=fov, radius=radius, |
| hdri_rot_deg=hdri_rot, full_video=full_video, shadow_video=shadow_video, |
| bg_color=(1, 1, 1), verbose=True, |
| ) |
| video_path = session_dir / ("camera_path_full.mp4" if full_video else "camera_path.mp4") |
| imageio.mimsave(video_path, frames, fps=int(fps)) |
| print(f"[NeAR] render_camera_video {time.time() - t0:.1f}s", flush=True) |
| return str(video_path), f"**Camera path video saved**" |
|
|
|
|
| @GPU |
| @torch.inference_mode() |
| def render_hdri_video( |
| asset_state: Dict[str, Any], |
| hdri_file_obj: Any, |
| fps: int, |
| num_frames: int, |
| yaw: float, |
| pitch: float, |
| fov: float, |
| radius: float, |
| full_video: bool, |
| shadow_video: bool, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| t0 = time.time() |
| session_dir = CACHE_DIR / str(req.session_hash) |
| progress(0.1, desc="Loading SLaT and HDRI") |
| slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req) |
|
|
| progress(0.4, desc="Rendering HDRI rotation") |
| hdri_roll_frames, render_frames = PIPELINE.render_hdri_rotation_video( |
| slat, hdri_np, |
| num_frames=int(num_frames), yaw_deg=yaw, pitch_deg=pitch, |
| fov=fov, radius=radius, full_video=full_video, shadow_video=shadow_video, |
| bg_color=(1, 1, 1), verbose=True, |
| ) |
| hdri_roll_path = session_dir / "hdri_roll.mp4" |
| render_path = session_dir / ("hdri_rotation_full.mp4" if full_video else "hdri_rotation.mp4") |
| imageio.mimsave(hdri_roll_path, hdri_roll_frames, fps=int(fps)) |
| imageio.mimsave(render_path, render_frames, fps=int(fps)) |
| print(f"[NeAR] render_hdri_video {time.time() - t0:.1f}s", flush=True) |
| return str(hdri_roll_path), str(render_path), "**HDRI rotation video saved**" |
|
|
|
|
| @GPU |
| def export_glb( |
| asset_state: Dict[str, Any], |
| hdri_file_obj: Any, |
| hdri_rot: float, |
| simplify: float, |
| texture_size: int, |
| req: gr.Request, |
| progress=gr.Progress(track_tqdm=True), |
| ): |
| t0 = time.time() |
| session_dir = CACHE_DIR / str(req.session_hash) |
| progress(0.1, desc="Loading SLaT and HDRI") |
| slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req) |
|
|
| progress(0.6, desc="Baking PBR textures") |
| glb = PIPELINE.export_glb_from_slat( |
| slat, hdri_np, |
| hdri_rot_deg=hdri_rot, base_mesh=None, |
| simplify=simplify, texture_size=int(texture_size), fill_holes=True, |
| ) |
| glb_path = session_dir / "near_pbr.glb" |
| glb.export(glb_path) |
| print(f"[NeAR] export_glb {time.time() - t0:.1f}s", flush=True) |
| return str(glb_path), f"PBR GLB exported: **{glb_path.name}**" |
|
|
|
|
| CUSTOM_CSS = """ |
| .gradio-container { max-width: 100% !important; width: 100% !important; } |
| main.gradio-container { max-width: 100% !important; } |
| .gradio-wrap { max-width: 100% !important; } |
| |
| /* Top header: TRELLIS-style left-aligned title + bullets */ |
| .near-app-header { |
| text-align: left !important; |
| padding: 0.35rem 0 1.1rem 0 !important; |
| margin: 0 !important; |
| } |
| .near-app-header .prose, |
| .near-app-header p { margin: 0 !important; } |
| .near-app-header h2 { |
| font-size: clamp(1.35rem, 2.4vw, 1.85rem) !important; |
| font-weight: 700 !important; |
| letter-spacing: -0.02em !important; |
| margin: 0 0 0.45rem 0 !important; |
| line-height: 1.25 !important; |
| } |
| .near-app-header h2 a { |
| color: var(--link-text-color, var(--color-accent)) !important; |
| text-decoration: none !important; |
| } |
| .near-app-header h2 a:hover { text-decoration: underline !important; } |
| .near-app-header ul { |
| margin: 0 !important; |
| padding-left: 1.2rem !important; |
| font-size: 0.88rem !important; |
| color: #4b5563 !important; |
| line-height: 1.45 !important; |
| } |
| .near-app-header li { margin: 0.15rem 0 !important; } |
| |
| /* Left column: compact section labels (no numbered circles) */ |
| .section-kicker { |
| font-size: 0.7rem !important; |
| font-weight: 700 !important; |
| color: #9ca3af !important; |
| text-transform: uppercase !important; |
| letter-spacing: 0.08em !important; |
| margin: 0 0 0.45rem 0 !important; |
| padding: 0 !important; |
| } |
| |
| /* HDRI file picker: light card instead of default dark block */ |
| .hdri-upload-zone, |
| .hdri-file-input, |
| .hdri-upload-zone .upload-container, |
| .hdri-upload-zone [data-testid="file-upload"], |
| .hdri-file-input [data-testid="file-upload"], |
| .hdri-upload-zone .file-preview, |
| .hdri-file-input .file-preview, |
| .hdri-upload-zone .wrap, |
| .hdri-file-input .wrap, |
| .hdri-upload-zone .panel, |
| .hdri-file-input .panel { |
| background: #f9fafb !important; |
| border-color: #e5e7eb !important; |
| color: #374151 !important; |
| } |
| .hdri-upload-zone .file-preview, |
| .hdri-file-input .file-preview { border-radius: 8px !important; } |
| .hdri-upload-zone .label-wrap, |
| .hdri-file-input .label-wrap { color: #4b5563 !important; } |
| |
| /* HDRI preview image: remove thick / black frame (Gradio panel border) */ |
| .hdri-preview-image, |
| .hdri-preview-image.panel, |
| .hdri-preview-image .wrap, |
| .hdri-preview-image .image-container, |
| .hdri-preview-image .image-frame, |
| .hdri-preview-image .image-wrapper, |
| .hdri-preview-image [data-testid="image"], |
| .hdri-preview-image .icon-buttons, |
| .hdri-preview-image img { |
| border: none !important; |
| outline: none !important; |
| box-shadow: none !important; |
| } |
| .hdri-preview-image img { |
| border-radius: 8px !important; |
| } |
| |
| /* Export accordion: remove heavy black box; keep a light separator on the header only */ |
| .export-accordion, |
| .export-accordion.panel, |
| .export-accordion > div, |
| .export-accordion details, |
| .export-accordion .label-wrap, |
| .export-accordion .accordion-header { |
| border: none !important; |
| outline: none !important; |
| box-shadow: none !important; |
| } |
| .export-accordion summary, |
| .export-accordion .label-wrap { |
| border-bottom: 1px solid #e5e7eb !important; |
| background: transparent !important; |
| } |
| |
| /* Gradio 4+ block chrome sometimes forces --block-border-color */ |
| .gradio-container .hdri-preview-image, |
| .gradio-container .export-accordion { |
| --block-border-width: 0px !important; |
| --panel-border-width: 0 !important; |
| } |
| |
| /* Shadow map preview: same flat frame as HDRI preview */ |
| .shadow-preview-image, |
| .shadow-preview-image.panel, |
| .shadow-preview-image .wrap, |
| .shadow-preview-image .image-container, |
| .shadow-preview-image .image-frame, |
| .shadow-preview-image .image-wrapper, |
| .shadow-preview-image [data-testid="image"], |
| .shadow-preview-image img { |
| border: none !important; |
| outline: none !important; |
| box-shadow: none !important; |
| } |
| .shadow-preview-image img { border-radius: 8px !important; } |
| .gradio-container .shadow-preview-image { |
| --block-border-width: 0px !important; |
| --panel-border-width: 0 !important; |
| } |
| |
| /* Main output tabs: larger, easier to spot */ |
| .main-output-tabs > .tab-nav, |
| .main-output-tabs .tab-nav button { |
| font-size: 0.95rem !important; |
| font-weight: 600 !important; |
| } |
| .main-output-tabs .tab-nav button { padding: 0.45rem 0.9rem !important; } |
| |
| /* Status strip: one left accent only (Gradio panel also draws accent — disable it here) */ |
| .gradio-container .status-footer, |
| .status-footer.panel, |
| .status-footer.block { |
| --block-border-width: 0px !important; |
| --panel-border-width: 0px !important; |
| } |
| .status-footer { |
| font-size: 0.8125rem !important; |
| line-height: 1.45 !important; |
| color: var(--body-text-color-subdued, #6b7280) !important; |
| margin: 0 0 0.65rem 0 !important; |
| padding: 0.5rem 0.65rem 0.5rem 0.7rem !important; |
| background: var(--block-background-fill, #f9fafb) !important; |
| /* Single box: one thick left edge (avoid stacking with Gradio .block border) */ |
| border-width: 1px 1px 1px 3px !important; |
| border-style: solid !important; |
| border-color: var(--border-color-primary, #e5e7eb) var(--border-color-primary, #e5e7eb) |
| var(--border-color-primary, #e5e7eb) var(--color-accent, #2563eb) !important; |
| border-radius: 8px !important; |
| box-shadow: 0 1px 2px rgba(15, 23, 42, 0.05) !important; |
| } |
| .status-footer .form, |
| .status-footer .wrap, |
| .status-footer .prose, |
| .status-footer .prose > *:first-child { |
| border: none !important; |
| box-shadow: none !important; |
| } |
| .status-footer .prose blockquote { |
| border-left: none !important; |
| padding-left: 0 !important; |
| margin-left: 0 !important; |
| } |
| .status-footer p, |
| .status-footer .prose p { |
| margin: 0 !important; |
| line-height: 1.05 !important; |
| } |
| .status-footer strong { |
| color: var(--body-text-color, #374151) !important; |
| font-weight: 600 !important; |
| } |
| .status-footer a { |
| color: var(--link-text-color, var(--color-accent, #2563eb)) !important; |
| text-decoration: none !important; |
| } |
| .status-footer a:hover { text-decoration: underline !important; } |
| |
| .ctrl-strip { |
| border:1px solid #e5e7eb; border-radius:8px; |
| padding:0.55rem 0.8rem 0.4rem; margin-bottom:0.6rem; background:#fff; |
| } |
| .ctrl-strip-title { |
| font-size:0.72rem; font-weight:600; color:#9ca3af; |
| text-transform:uppercase; letter-spacing:0.06em; margin-bottom:0.4rem; |
| } |
| |
| .mat-label { |
| font-size:0.72rem; font-weight:700; color:#9ca3af; |
| text-transform:uppercase; letter-spacing:0.07em; margin:0.7rem 0 0.2rem; |
| } |
| |
| .divider { border:none; border-top:1px solid #e5e7eb; margin:0.5rem 0; } |
| |
| .img-gallery table { display:grid !important; grid-template-columns:repeat(3,1fr) !important; gap:3px !important; } |
| .img-gallery table thead { display:none !important; } |
| .img-gallery table tr { display:contents !important; } |
| .img-gallery table td { padding:0 !important; } |
| .img-gallery table td img { width:100% !important; height:68px !important; object-fit:cover !important; border-radius:5px !important; } |
| |
| .hdri-gallery table { display:grid !important; grid-template-columns:repeat(2,1fr) !important; gap:3px !important; } |
| .hdri-gallery table thead { display:none !important; } |
| .hdri-gallery table tr { display:contents !important; } |
| .hdri-gallery table td { padding:0 !important; font-size:0.76rem; text-align:center; word-break:break-all; } |
| |
| /* Right sidebar: align with TRELLIS-style narrow examples column */ |
| .sidebar-examples { min-width: 0 !important; } |
| .sidebar-examples .label-wrap { font-size: 0.85rem !important; } |
| .gradio-container .sidebar-examples table { width: 100% !important; } |
| |
| footer { display:none !important; } |
| """ |
|
|
| NEAR_GRADIO_THEME = gr.themes.Base( |
| primary_hue=gr.themes.colors.blue, |
| secondary_hue=gr.themes.colors.blue, |
| ) |
|
|
|
|
| def build_app() -> gr.Blocks: |
| with gr.Blocks( |
| title="NeAR", |
| theme=NEAR_GRADIO_THEME, |
| delete_cache=None, |
| fill_width=True, |
| ) as demo: |
| asset_state = gr.State({}) |
|
|
| gr.Markdown( |
| """ |
| ## Single Image to Relightable 3DGS with [NeAR](https://near-project.github.io/) |
| * Upload an RGBA image (or load an existing SLaT), run **Generate Mesh** then **Generate / Load SLaT**, pick an HDRI, and use **Camera & HDRI** to relight. |
| * Use **Geometry** for mesh / PBR preview, **Preview** for still renders, **Videos** for camera or HDRI paths; **Export PBR GLB** when you are happy with the result. |
| * Texture style transfer is possible when the reference images used for **mesh** and **SLaT** are different. |
| """, |
| elem_classes=["near-app-header"], |
| ) |
|
|
| _img_ex = [ |
| [str(p)] |
| for p in sorted((APP_DIR / "assets/example_image").glob("*.png")) |
| if not _path_is_git_lfs_pointer(p) |
| ] |
| _slat_ex = [ |
| [str(p)] |
| for p in sorted((APP_DIR / "assets/example_slats").glob("*.npz")) |
| if not _path_is_git_lfs_pointer(p) |
| ] |
| _hdri_ex = [ |
| [str(p)] |
| for p in sorted((APP_DIR / "assets/hdris").glob("*.exr")) |
| if not _path_is_git_lfs_pointer(p) |
| ] |
| if not _img_ex and (APP_DIR / "assets/example_image").is_dir(): |
| print( |
| "[NeAR] WARNING: no usable PNG examples (empty dir or all Git LFS pointers).", |
| flush=True, |
| ) |
|
|
| with gr.Row(equal_height=False): |
|
|
| with gr.Column(scale=1, min_width=360): |
|
|
| with gr.Group(): |
| gr.HTML('<p class="section-kicker">Asset</p>') |
| source_mode = gr.Radio( |
| ["From Image", "From Existing SLaT"], |
| value="From Image", |
| label="", |
| show_label=False, |
| ) |
| with gr.Tabs(selected=0) as source_tabs: |
|
|
| with gr.Tab("Image", id=0): |
| image_input = gr.Image( |
| label="Input Image", type="pil", image_mode="RGBA", |
| value=str(DEFAULT_IMAGE) if DEFAULT_IMAGE.exists() else None, |
| height=400, |
| ) |
| seed = gr.Slider(0, MAX_SEED, value=43, step=1, label="Seed (SLaT)") |
| mesh_button = gr.Button("① Generate Mesh", variant="primary", min_width=100) |
|
|
| with gr.Tab("SLaT", id=1): |
| slat_upload = gr.File(label="Upload SLaT (.npz)", file_types=[".npz"]) |
| slat_path_text = gr.Textbox( |
| label="Or enter local path", |
| placeholder="/path/to/sample_slat.npz", |
| ) |
|
|
| slat_button = gr.Button( |
| "② Generate / Load SLaT", variant="primary", min_width=100, |
| ) |
|
|
| with gr.Group(): |
| gr.HTML('<p class="section-kicker">HDRI</p>') |
| with gr.Column(elem_classes=["hdri-upload-zone"]): |
| hdri_file = gr.File( |
| label="Environment (.exr)", file_types=[".exr"], |
| value=str(DEFAULT_HDRI) if DEFAULT_HDRI.exists() else None, |
| elem_classes=["hdri-file-input"], |
| ) |
| hdri_preview = gr.Image( |
| label="Preview", |
| interactive=False, |
| height=130, |
| container=False, |
| elem_classes=["hdri-preview-image"], |
| ) |
|
|
| with gr.Group(): |
| gr.HTML('<p class="section-kicker">Export</p>') |
| with gr.Accordion( |
| "Export Settings", |
| open=False, |
| elem_classes=["export-accordion"], |
| ): |
| with gr.Row(): |
| simplify = gr.Slider(0.8, 0.99, value=0.95, step=0.01, label="Mesh Simplify") |
| texture_size = gr.Slider(512, 4096, value=2048, step=512, label="Texture Size") |
|
|
| with gr.Row(): |
| clear_button = gr.Button("Clear Cache", variant="secondary", min_width=100) |
|
|
| with gr.Column(scale=10, min_width=560): |
|
|
| status_md = gr.Markdown( |
| "Ready — use **Asset** (left) and **HDRI** to begin.", |
| elem_classes=["status-footer"], |
| ) |
|
|
|
|
| with gr.Group(elem_classes=["ctrl-strip"]): |
| gr.HTML("<div class='ctrl-strip-title'>Camera & HDRI</div>") |
| with gr.Row(): |
| tone_mapper_name = gr.Dropdown( |
| choices=AVAILABLE_TONE_MAPPERS, |
| value="AgX", |
| label="Tone Mapper", |
| min_width=120, |
| ) |
| hdri_rot = gr.Slider(0, 360, value=0, step=1, label="HDRI Rotation °") |
| resolution = gr.Slider(256, 1024, value=512, step=256, label="Preview Res") |
| with gr.Row(): |
| yaw = gr.Slider(0, 360, value=0, step=0.5, label="Yaw °") |
| pitch = gr.Slider(-90, 90, value=0, step=0.5, label="Pitch °") |
| fov = gr.Slider(10, 70, value=40, step=1, label="FoV") |
| radius = gr.Slider(1.0, 4.0, value=2.0, step=0.05, label="Radius") |
|
|
| tone_mapper_name.change( |
| set_tone_mapper, |
| inputs=[tone_mapper_name], |
| outputs=[], |
| ) |
|
|
| with gr.Tabs(elem_classes=["main-output-tabs"]): |
|
|
| with gr.Tab("Geometry", id=0): |
| with gr.Row(): |
| mesh_viewer = gr.Model3D( |
| label="3D Mesh", interactive=False, height=520, |
| ) |
| pbr_viewer = gr.Model3D( |
| label="PBR GLB", interactive=False, height=520, |
| ) |
| gr.HTML("<hr class='divider'>") |
| with gr.Row(): |
| export_glb_button = gr.Button("Export PBR GLB", variant="primary", min_width=140) |
|
|
| with gr.Tab("Preview", id=1): |
| preview_button = gr.Button("Render Preview", variant="primary", min_width=100) |
| gr.HTML("<hr class='divider'>") |
| with gr.Row(): |
| color_output = gr.Image(label="Relit Result", interactive=False, height=400) |
| with gr.Column(): |
| with gr.Row(): |
| base_color_output = gr.Image(label="Base Color", interactive=False, height=200) |
| metallic_output = gr.Image(label="Metallic", interactive=False, height=200) |
| with gr.Row(): |
| roughness_output = gr.Image(label="Roughness", interactive=False, height=200) |
| shadow_output = gr.Image(label="Shadow", interactive=False, height=200) |
|
|
| with gr.Tab("Videos", id=2): |
| with gr.Accordion("Video Settings", open=False): |
| with gr.Row(): |
| fps = gr.Slider(1, 60, value=24, step=1, label="FPS") |
| num_views = gr.Slider(8, 120, value=40, step=1, label="Camera Frames") |
| num_frames = gr.Slider(8, 120, value=40, step=1, label="HDRI Frames") |
| with gr.Row(): |
| full_video = gr.Checkbox(label="Full composite video", value=True) |
| shadow_video = gr.Checkbox( |
| label="Include shadow in video", |
| value=True, |
| ) |
| with gr.Row(): |
| camera_video_button = gr.Button("Camera Path Video", variant="primary", min_width=100) |
| hdri_video_button = gr.Button("HDRI Rotation Video", variant="primary", min_width=100) |
| camera_video_output = gr.Video( |
| label="Camera Path", autoplay=True, loop=True, height=340, |
| ) |
| hdri_render_video_output = gr.Video( |
| label="HDRI Rotation Render", autoplay=True, loop=True, height=300, |
| ) |
| with gr.Accordion("HDRI Roll (environment panorama)", open=False): |
| hdri_roll_video_output = gr.Video( |
| label="HDRI Roll", autoplay=True, loop=True, height=180, |
| ) |
|
|
| with gr.Column(scale=1, min_width=172): |
| with gr.Column(visible=True, elem_classes=["sidebar-examples", "img-gallery"]) as col_img_examples: |
| if _img_ex: |
| gr.Examples( |
| examples=_img_ex, |
| inputs=[image_input], |
| fn=preprocess_image_only, |
| outputs=[image_input], |
| run_on_click=True, |
| examples_per_page=18, |
| label="Examples", |
| ) |
| else: |
| gr.Markdown("*No PNG examples in `assets/example_image`*") |
|
|
| with gr.Column(visible=False, elem_classes=["sidebar-examples"]) as col_slat_examples: |
| if _slat_ex: |
| gr.Examples( |
| examples=_slat_ex, |
| inputs=[slat_path_text], |
| label="Example SLaTs", |
| ) |
| else: |
| gr.Markdown("*No `.npz` examples in `assets/example_slats`*") |
|
|
| with gr.Column(visible=True, elem_classes=["sidebar-examples", "hdri-gallery"]) as col_hdri_examples: |
| if _hdri_ex: |
| gr.Examples( |
| examples=_hdri_ex, |
| inputs=[hdri_file], |
| label="Example HDRIs", |
| examples_per_page=8, |
| ) |
| else: |
| gr.Markdown("*No `.exr` examples in `assets/hdris`*") |
|
|
| demo.load(start_session) |
| demo.unload(end_session) |
|
|
| source_mode.change(switch_asset_source, inputs=[source_mode], outputs=[source_tabs]) |
| source_mode.change( |
| lambda m: ( |
| gr.update(visible=m == "From Image"), |
| gr.update(visible=m == "From Existing SLaT"), |
| ), |
| inputs=[source_mode], |
| outputs=[col_img_examples, col_slat_examples], |
| ) |
|
|
| for _trigger in (hdri_file.upload, hdri_file.change): |
| _trigger( |
| preview_hdri, |
| inputs=[hdri_file], |
| outputs=[hdri_preview, status_md], |
| ) |
|
|
| image_input.upload( |
| preprocess_image_only, |
| inputs=[image_input], |
| outputs=[image_input], |
| ) |
|
|
| mesh_button.click( |
| generate_mesh, |
| inputs=[image_input], |
| outputs=[asset_state, mesh_viewer, status_md], |
| ) |
|
|
| slat_button.click( |
| prepare_slat, |
| inputs=[source_mode, asset_state, image_input, seed, slat_upload, slat_path_text], |
| outputs=[asset_state, status_md], |
| ) |
|
|
| preview_button.click( |
| render_preview, |
| inputs=[asset_state, hdri_file, hdri_rot, |
| yaw, pitch, fov, radius, resolution], |
| outputs=[ |
| color_output, |
| base_color_output, |
| metallic_output, |
| roughness_output, |
| shadow_output, |
| status_md, |
| ], |
| ) |
|
|
| camera_video_button.click( |
| render_camera_video, |
| inputs=[asset_state, hdri_file, hdri_rot, |
| fps, num_views, fov, radius, full_video, shadow_video], |
| outputs=[camera_video_output, status_md], |
| ) |
|
|
| hdri_video_button.click( |
| render_hdri_video, |
| inputs=[asset_state, hdri_file, |
| fps, num_frames, yaw, pitch, fov, radius, full_video, shadow_video], |
| outputs=[hdri_roll_video_output, hdri_render_video_output, status_md], |
| ) |
|
|
| export_glb_button.click( |
| export_glb, |
| inputs=[asset_state, hdri_file, hdri_rot, simplify, texture_size], |
| outputs=[pbr_viewer, status_md], |
| ) |
| return demo |
|
|
|
|
| PIPELINE = NeARImageToRelightable3DPipeline.from_pretrained("luh0502/NeAR") |
| GEOMETRY_PIPELINE = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained("tencent/Hunyuan3D-2.1") |
|
|
| if spaces is not None: |
| PIPELINE.to("cuda") |
| GEOMETRY_PIPELINE.to("cuda") |
|
|
| demo = build_app() |
|
|
| if __name__ == "__main__": |
| demo.launch( |
| mcp_server=True |
| ) |
|
|