luh1124 commited on
Commit
249adae
·
1 Parent(s): 2b829a1

refactor(app): ZeroGPU-oriented NeAR UI; path-only SLaT; dual videos

Browse files

- Replace app.py with linear pipeline (geometry, SLaT to disk, preview, PBR GLB, bundled videos)
- Add app_legacy.py with previous Gradio layout
- CPU preload + ensure_geometry_on_cuda / ensure_near_on_cuda; optional geometry offload after mesh
- README Space app_file: app.py

Made-with: Cursor

Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +436 -755
  3. app_legacy.py +1005 -0
README.md CHANGED
@@ -6,7 +6,7 @@ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 6.9.0
8
  python_version: "3.10"
9
- app_file: app_gsplat.py
10
  pinned: false
11
  license: apache-2.0
12
  short_description: "Relightable 3D from one image: SLAT, neural renderer, HDRI"
 
6
  sdk: gradio
7
  sdk_version: 6.9.0
8
  python_version: "3.10"
9
+ app_file: app.py
10
  pinned: false
11
  license: apache-2.0
12
  short_description: "Relightable 3D from one image: SLAT, neural renderer, HDRI"
app.py CHANGED
@@ -1,17 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
- import sys
3
  import shutil
 
4
  import threading
5
  import time
6
  from pathlib import Path
7
  from typing import Any, Dict, Optional
8
 
9
  import gradio as gr
10
-
11
- try:
12
- import spaces # pyright: ignore[reportMissingImports]
13
- except ImportError:
14
- spaces = None
15
  import imageio
16
  import numpy as np
17
  import torch
@@ -19,14 +26,37 @@ import trimesh
19
  from PIL import Image
20
  from simple_ocio import ToneMapper # pyright: ignore[reportMissingImports]
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  sys.path.insert(0, "./hy3dshape")
23
  os.environ.setdefault("ATTN_BACKEND", "xformers")
24
  os.environ.setdefault("SPCONV_ALGO", "native")
25
  os.environ.setdefault("TORCH_CUDA_ARCH_LIST", "7.5;8.0;8.6;8.9;9.0")
26
 
27
-
28
- from trellis.pipelines import NeARImageToRelightable3DPipeline
29
  from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline # pyright: ignore[reportMissingImports]
 
 
30
 
31
  GPU = spaces.GPU if spaces is not None else (lambda f: f)
32
 
@@ -34,15 +64,37 @@ APP_DIR = Path(__file__).resolve().parent
34
  CACHE_DIR = APP_DIR / "tmp_gradio"
35
  CACHE_DIR.mkdir(exist_ok=True)
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def _path_is_git_lfs_pointer(p: Path) -> bool:
39
  try:
40
- if not p.is_file():
41
  return False
42
- if p.stat().st_size > 512:
43
- return False
44
- head = p.read_bytes()[:120]
45
- return head.startswith(b"version https://git-lfs.github.com/spec/v1")
46
  except OSError:
47
  return False
48
 
@@ -50,36 +102,76 @@ def _path_is_git_lfs_pointer(p: Path) -> bool:
50
  def _warn_example_assets() -> None:
51
  img_dir = APP_DIR / "assets/example_image"
52
  if not img_dir.is_dir():
53
- print(
54
- "[NeAR] WARNING: assets/example_image/ is missing — commit and push the full assets/ tree.",
55
- flush=True,
56
- )
57
  return
58
  sample = img_dir / "T.png"
59
  if sample.is_file() and _path_is_git_lfs_pointer(sample):
60
- print(
61
- "[NeAR] WARNING: assets look like Git LFS pointers (not real PNG/NPZ/EXR bytes). "
62
- "Run: git lfs install && git lfs push --all origin (from a clone that has full files).",
63
- flush=True,
64
- )
65
 
66
 
67
  _warn_example_assets()
68
 
69
- DEFAULT_IMAGE = APP_DIR / "assets/example_image/T.png"
70
- DEFAULT_HDRI = APP_DIR / "assets/hdris/studio_small_03_1k.exr"
71
- MAX_SEED = np.iinfo(np.int32).max
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- def start_session(req: gr.Request):
75
- user_dir = CACHE_DIR / str(req.session_hash)
76
- os.makedirs(user_dir, exist_ok=True)
77
-
78
-
79
- def end_session(req: gr.Request):
80
- user_dir = CACHE_DIR / str(req.session_hash)
81
- shutil.rmtree(user_dir)
82
- _SESSION_SLAT.pop(str(req.session_hash), None)
83
 
84
 
85
  def get_file_path(file_obj: Any) -> Optional[str]:
@@ -94,20 +186,36 @@ def get_file_path(file_obj: Any) -> Optional[str]:
94
  return None
95
 
96
 
97
- PIPELINE: Optional[NeARImageToRelightable3DPipeline] = None
98
- GEOMETRY_PIPELINE: Optional[Hunyuan3DDiTFlowMatchingPipeline] = None
99
- tone_mapper = ToneMapper()
100
- AVAILABLE_TONE_MAPPERS = getattr(tone_mapper, "available_views", ["AgX"])
 
 
 
 
101
 
102
- # In-process SLaT for the image workflow (not serialized through Gradio State).
103
- _SESSION_SLAT: Dict[str, Any] = {}
104
 
105
- def set_tone_mapper(view_name: str):
106
  if view_name and PIPELINE is not None:
107
  PIPELINE.setup_tone_mapper(view_name)
108
 
109
- from hy3dshape.rembg import BackgroundRemover # pyright: ignore[reportMissingImports]
110
- LIGHT_PREPROCESSOR = BackgroundRemover()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
 
113
  def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image:
@@ -116,20 +224,18 @@ def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image:
116
  if image.mode == "RGBA":
117
  alpha = np.array(image)[:, :, 3]
118
  has_alpha = not np.all(alpha == 255)
119
-
120
  if has_alpha:
121
  output = image
122
  else:
123
  rgb = image.convert("RGB")
124
  max_size = max(rgb.size)
125
- scale = min(1, 1024 / max_size)
126
  if scale < 1:
127
  rgb = rgb.resize(
128
  (int(rgb.width * scale), int(rgb.height * scale)),
129
  Image.Resampling.LANCZOS,
130
  )
131
  output = LIGHT_PREPROCESSOR(rgb)
132
-
133
  if output.mode != "RGBA":
134
  output = output.convert("RGBA")
135
  output_np = np.array(output)
@@ -155,39 +261,29 @@ def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image:
155
  return output.crop(padded_bbox).resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA")
156
 
157
 
158
- def _flatten_rgba_on_matte(image: Image.Image, matte_rgb: tuple[float, float, float]) -> Image.Image:
159
- return NeARImageToRelightable3DPipeline.flatten_rgba_on_matte(image, matte_rgb)
160
-
161
-
162
- def preview_hdri(hdri_file_obj: Any):
163
- hdri_path = get_file_path(hdri_file_obj)
164
- if not hdri_path:
165
- return None, "Upload an HDRI `.exr` (left column)."
166
- import pyexr # pyright: ignore[reportMissingImports]
167
-
168
- hdri_np = pyexr.read(hdri_path)[..., :3]
169
- tm = ToneMapper(view="Khronos PBR Neutral")
170
- preview = tm.hdr_to_ldr(hdri_np)
171
- preview = (np.clip(preview, 0, 1) * 255).astype(np.uint8)
172
- name = Path(hdri_path).name
173
- return preview, f"HDRI **{name}** — preview updated."
174
 
175
 
176
- def switch_asset_source(mode: str):
177
- return gr.Tabs(selected=1 if mode == "From Existing SLaT" else 0)
178
 
179
 
180
- def _ensure_rgba(img: Image.Image) -> Image.Image:
181
- if img.mode == "RGBA":
182
- return img
183
- return img.convert("RGBA")
 
184
 
185
 
186
- @torch.inference_mode()
187
- def preprocess_image_only(image_input: Optional[Image.Image]):
188
- if image_input is None:
189
- return None
190
- return _preprocess_image_rgba_light(image_input)
191
 
192
 
193
  @GPU
@@ -195,136 +291,106 @@ def preprocess_image_only(image_input: Optional[Image.Image]):
195
  def generate_mesh(
196
  image_input: Optional[Image.Image],
197
  req: gr.Request,
198
- progress=gr.Progress(track_tqdm=True),
199
- ):
200
- session_dir = CACHE_DIR / str(req.session_hash)
201
-
202
  if image_input is None:
203
- raise gr.Error("Please upload an input image.")
 
 
204
 
205
  rgba = _ensure_rgba(image_input)
206
  if rgba.size != (518, 518):
207
  rgba = _preprocess_image_rgba_light(rgba)
208
- # Hunyuan3D mesh: composite onto white. SLaT step uses black matte separately.
209
  mesh_rgb = _flatten_rgba_on_matte(rgba, (1.0, 1.0, 1.0))
210
  rgba.save(session_dir / "input_preprocessed_rgba.png")
211
  mesh_rgb.save(session_dir / "input_processed.png")
212
 
213
- progress(0.6, desc="Generating geometry")
 
214
  mesh = GEOMETRY_PIPELINE(image=mesh_rgb)[0]
215
  mesh_path = session_dir / "initial_3d_shape.glb"
216
  mesh.export(mesh_path)
 
 
 
 
 
 
 
 
 
217
 
218
- _SESSION_SLAT.pop(str(req.session_hash), None)
219
- state = {
220
- "mode": "image",
221
  "mesh_path": str(mesh_path),
222
- "processed_image_path": str(session_dir / "input_processed.png"),
223
  "slat_path": None,
224
- "slat_in_memory": False,
225
  }
226
- return (
227
- state,
228
- str(mesh_path),
229
- "**Mesh ready** — Click **② Generate / Load SLaT** to continue.",
230
- )
231
 
232
 
233
  @GPU
234
  @torch.inference_mode()
235
- def _generate_slat_inner(
236
  asset_state: Dict[str, Any],
237
  image_input: Optional[Image.Image],
238
  seed: int,
239
  req: gr.Request,
240
- progress=gr.Progress(track_tqdm=True),
241
- ):
242
- """GPU body for SLaT generation — must be called from within a @GPU context."""
243
- if not asset_state or not asset_state.get("mesh_path"):
244
- raise gr.Error("Please run Generate Mesh first.")
245
  mesh_path = asset_state["mesh_path"]
246
- if not os.path.exists(mesh_path):
247
- raise gr.Error("Mesh file not found please regenerate the mesh.")
248
 
249
  if image_input is None:
250
- raise gr.Error("Preprocessed image not found — please upload the image again.")
 
251
 
252
- progress(0.1, desc="Loading mesh")
253
  mesh = trimesh.load(mesh_path, force="mesh")
254
  rgba = _ensure_rgba(image_input)
255
  if rgba.size != (518, 518):
256
  rgba = _preprocess_image_rgba_light(rgba)
257
  slat_rgb = _flatten_rgba_on_matte(rgba, (0.0, 0.0, 0.0))
258
 
259
- progress(0.3, desc="Computing SLaT coordinates")
 
260
  coords = PIPELINE.shape_to_coords(mesh)
 
 
261
 
262
- progress(0.6, desc="Generating SLaT")
263
  slat = PIPELINE.run_with_coords([slat_rgb], coords, seed=int(seed), preprocess_image=False)
 
264
 
265
- _SESSION_SLAT[str(req.session_hash)] = slat
266
- new_state = {**asset_state, "slat_path": None, "slat_in_memory": True}
267
- return new_state, f"**Asset ready** — SLaT generated (seed `{seed}`)."
268
-
269
 
270
- def _load_slat_file_inner(slat_upload: Any, slat_path_text: str, req: gr.Request):
271
- resolved = get_file_path(slat_upload) or (slat_path_text.strip() if slat_path_text else "")
272
- if not resolved:
273
- raise gr.Error("Please provide a SLaT `.npz` path or upload one.")
274
- if not os.path.exists(resolved):
275
- raise gr.Error(f"SLaT file not found: `{resolved}`")
276
- _SESSION_SLAT.pop(str(req.session_hash), None)
277
- state = {
278
- "mode": "slat",
279
- "slat_path": resolved,
280
- "mesh_path": None,
281
- "processed_image_path": None,
282
- "slat_in_memory": False,
283
- }
284
- return state, f"SLaT **{Path(resolved).name}** loaded."
285
 
286
 
287
- @GPU
288
- @torch.inference_mode()
289
- def prepare_slat(
290
- source_mode: str,
291
- asset_state: Dict[str, Any],
292
- image_input: Optional[Image.Image],
293
- seed: int,
294
  slat_upload: Any,
295
  slat_path_text: str,
296
  req: gr.Request,
297
- progress=gr.Progress(track_tqdm=True),
298
- ):
299
- if source_mode == "From Image":
300
- return _generate_slat_inner(asset_state, image_input, seed, req, progress)
301
- return _load_slat_file_inner(slat_upload, slat_path_text, req)
302
-
303
-
304
- def require_asset_state(asset_state: Optional[Dict[str, Any]]) -> Dict[str, Any]:
305
- if not asset_state:
306
- raise gr.Error("Please generate or load a SLaT first.")
307
- if asset_state.get("slat_in_memory") or asset_state.get("slat_path"):
308
- return asset_state
309
- raise gr.Error("Please generate or load a SLaT first.")
310
-
311
-
312
- def load_asset_and_hdri(asset_state: Dict[str, Any], hdri_file_obj: Any, req: gr.Request):
313
- asset_state = require_asset_state(asset_state)
314
- hdri_path = get_file_path(hdri_file_obj)
315
- if not hdri_path:
316
- raise gr.Error("Please upload an HDRI `.exr` file.")
317
- if asset_state.get("slat_in_memory"):
318
- slat = _SESSION_SLAT.get(str(req.session_hash))
319
- if slat is None:
320
- raise gr.Error("SLaT session expired — run **② Generate / Load SLaT** again.")
321
- else:
322
- slat_path = asset_state.get("slat_path")
323
- if not slat_path:
324
- raise gr.Error("Please generate or load a SLaT first.")
325
- slat = PIPELINE.load_slat(slat_path)
326
- hdri_np = PIPELINE.load_hdri(hdri_path)
327
- return slat, hdri_np
328
 
329
 
330
  @GPU
@@ -338,29 +404,29 @@ def render_preview(
338
  fov: float,
339
  radius: float,
340
  resolution: int,
341
- req: gr.Request,
342
- progress=gr.Progress(track_tqdm=True),
343
- ):
344
- t0 = time.time()
345
- session_dir = CACHE_DIR / str(req.session_hash)
346
- progress(0.1, desc="Loading SLaT and HDRI")
347
- slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
348
-
349
- progress(0.5, desc="Rendering")
 
350
  views = PIPELINE.render_view(
351
- slat, hdri_np,
352
- yaw_deg=yaw, pitch_deg=pitch, fov=fov, radius=radius,
353
- hdri_rot_deg=hdri_rot, resolution=int(resolution),
354
- )
355
- for key, image in views.items():
356
- image.save(session_dir / f"preview_{key}.png")
357
- print(f"[NeAR] render_preview {time.time() - t0:.1f}s", flush=True)
358
-
359
- msg = (
360
- f"**Preview done** — "
361
- f"yaw `{yaw:.0f}°` pitch `{pitch:.0f}°` · "
362
- f"fov `{fov:.0f}` radius `{radius:.1f}` · HDRI rot `{hdri_rot:.0f}°`"
363
  )
 
 
 
364
  return (
365
  views["color"],
366
  views["base_color"],
@@ -372,634 +438,249 @@ def render_preview(
372
 
373
 
374
  @GPU
375
- @torch.inference_mode()
376
- def render_camera_video(
377
  asset_state: Dict[str, Any],
378
  hdri_file_obj: Any,
379
  hdri_rot: float,
380
- fps: int,
381
- num_views: int,
382
- fov: float,
383
- radius: float,
384
- full_video: bool,
385
- shadow_video: bool,
386
  req: gr.Request,
387
- progress=gr.Progress(track_tqdm=True),
388
- ):
389
- t0 = time.time()
 
 
390
  session_dir = CACHE_DIR / str(req.session_hash)
391
- progress(0.1, desc="Loading SLaT and HDRI")
392
- slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
393
-
394
- progress(0.4, desc="Rendering camera path")
395
- frames = PIPELINE.render_camera_path_video(
396
- slat, hdri_np,
397
- num_views=int(num_views), fov=fov, radius=radius,
398
- hdri_rot_deg=hdri_rot, full_video=full_video, shadow_video=shadow_video,
399
- bg_color=(1, 1, 1), verbose=True,
 
 
 
 
400
  )
401
- video_path = session_dir / ("camera_path_full.mp4" if full_video else "camera_path.mp4")
402
- imageio.mimsave(video_path, frames, fps=int(fps))
403
- print(f"[NeAR] render_camera_video {time.time() - t0:.1f}s", flush=True)
404
- return str(video_path), f"**Camera path video saved**"
 
 
 
405
 
406
 
407
  @GPU
408
  @torch.inference_mode()
409
- def render_hdri_video(
410
  asset_state: Dict[str, Any],
411
  hdri_file_obj: Any,
 
412
  fps: int,
413
- num_frames: int,
 
414
  yaw: float,
415
  pitch: float,
416
  fov: float,
417
  radius: float,
418
- full_video: bool,
419
- shadow_video: bool,
420
  req: gr.Request,
421
- progress=gr.Progress(track_tqdm=True),
422
- ):
423
- t0 = time.time()
 
 
 
424
  session_dir = CACHE_DIR / str(req.session_hash)
425
- progress(0.1, desc="Loading SLaT and HDRI")
426
- slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
427
-
428
- progress(0.4, desc="Rendering HDRI rotation")
429
- hdri_roll_frames, render_frames = PIPELINE.render_hdri_rotation_video(
430
- slat, hdri_np,
431
- num_frames=int(num_frames), yaw_deg=yaw, pitch_deg=pitch,
432
- fov=fov, radius=radius, full_video=full_video, shadow_video=shadow_video,
433
- bg_color=(1, 1, 1), verbose=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
  )
435
- hdri_roll_path = session_dir / "hdri_roll.mp4"
436
- render_path = session_dir / ("hdri_rotation_full.mp4" if full_video else "hdri_rotation.mp4")
437
- imageio.mimsave(hdri_roll_path, hdri_roll_frames, fps=int(fps))
438
- imageio.mimsave(render_path, render_frames, fps=int(fps))
439
- print(f"[NeAR] render_hdri_video {time.time() - t0:.1f}s", flush=True)
440
- return str(hdri_roll_path), str(render_path), "**HDRI rotation video saved**"
 
441
 
 
 
442
 
443
- @GPU
444
- def export_glb(
445
- asset_state: Dict[str, Any],
446
- hdri_file_obj: Any,
447
- hdri_rot: float,
448
- simplify: float,
449
- texture_size: int,
450
- req: gr.Request,
451
- progress=gr.Progress(track_tqdm=True),
452
- ):
453
- t0 = time.time()
454
  session_dir = CACHE_DIR / str(req.session_hash)
455
- progress(0.1, desc="Loading SLaT and HDRI")
456
- slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
 
 
457
 
458
- progress(0.6, desc="Baking PBR textures")
459
- glb = PIPELINE.export_glb_from_slat(
460
- slat, hdri_np,
461
- hdri_rot_deg=hdri_rot, base_mesh=None,
462
- simplify=simplify, texture_size=int(texture_size), fill_holes=True,
463
- )
464
- glb_path = session_dir / "near_pbr.glb"
465
- glb.export(glb_path)
466
- print(f"[NeAR] export_glb {time.time() - t0:.1f}s", flush=True)
467
- return str(glb_path), f"PBR GLB exported: **{glb_path.name}**"
468
-
469
-
470
- CUSTOM_CSS = """
471
- .gradio-container { max-width: 100% !important; width: 100% !important; }
472
- main.gradio-container { max-width: 100% !important; }
473
- .gradio-wrap { max-width: 100% !important; }
474
-
475
- /* Top header: TRELLIS-style left-aligned title + bullets */
476
- .near-app-header {
477
- text-align: left !important;
478
- padding: 0.35rem 0 1.1rem 0 !important;
479
- margin: 0 !important;
480
- }
481
- .near-app-header .prose,
482
- .near-app-header p { margin: 0 !important; }
483
- .near-app-header h2 {
484
- font-size: clamp(1.35rem, 2.4vw, 1.85rem) !important;
485
- font-weight: 700 !important;
486
- letter-spacing: -0.02em !important;
487
- margin: 0 0 0.45rem 0 !important;
488
- line-height: 1.25 !important;
489
- }
490
- .near-app-header h2 a {
491
- color: var(--link-text-color, var(--color-accent)) !important;
492
- text-decoration: none !important;
493
- }
494
- .near-app-header h2 a:hover { text-decoration: underline !important; }
495
- .near-app-header ul {
496
- margin: 0 !important;
497
- padding-left: 1.2rem !important;
498
- font-size: 0.88rem !important;
499
- color: #4b5563 !important;
500
- line-height: 1.45 !important;
501
- }
502
- .near-app-header li { margin: 0.15rem 0 !important; }
503
-
504
- /* Left column: compact section labels (no numbered circles) */
505
- .section-kicker {
506
- font-size: 0.7rem !important;
507
- font-weight: 700 !important;
508
- color: #9ca3af !important;
509
- text-transform: uppercase !important;
510
- letter-spacing: 0.08em !important;
511
- margin: 0 0 0.45rem 0 !important;
512
- padding: 0 !important;
513
- }
514
-
515
- /* HDRI file picker: light card instead of default dark block */
516
- .hdri-upload-zone,
517
- .hdri-file-input,
518
- .hdri-upload-zone .upload-container,
519
- .hdri-upload-zone [data-testid="file-upload"],
520
- .hdri-file-input [data-testid="file-upload"],
521
- .hdri-upload-zone .file-preview,
522
- .hdri-file-input .file-preview,
523
- .hdri-upload-zone .wrap,
524
- .hdri-file-input .wrap,
525
- .hdri-upload-zone .panel,
526
- .hdri-file-input .panel {
527
- background: #f9fafb !important;
528
- border-color: #e5e7eb !important;
529
- color: #374151 !important;
530
- }
531
- .hdri-upload-zone .file-preview,
532
- .hdri-file-input .file-preview { border-radius: 8px !important; }
533
- .hdri-upload-zone .label-wrap,
534
- .hdri-file-input .label-wrap { color: #4b5563 !important; }
535
-
536
- /* HDRI preview image: remove thick / black frame (Gradio panel border) */
537
- .hdri-preview-image,
538
- .hdri-preview-image.panel,
539
- .hdri-preview-image .wrap,
540
- .hdri-preview-image .image-container,
541
- .hdri-preview-image .image-frame,
542
- .hdri-preview-image .image-wrapper,
543
- .hdri-preview-image [data-testid="image"],
544
- .hdri-preview-image .icon-buttons,
545
- .hdri-preview-image img {
546
- border: none !important;
547
- outline: none !important;
548
- box-shadow: none !important;
549
- }
550
- .hdri-preview-image img {
551
- border-radius: 8px !important;
552
- }
553
-
554
- /* Export accordion: remove heavy black box; keep a light separator on the header only */
555
- .export-accordion,
556
- .export-accordion.panel,
557
- .export-accordion > div,
558
- .export-accordion details,
559
- .export-accordion .label-wrap,
560
- .export-accordion .accordion-header {
561
- border: none !important;
562
- outline: none !important;
563
- box-shadow: none !important;
564
- }
565
- .export-accordion summary,
566
- .export-accordion .label-wrap {
567
- border-bottom: 1px solid #e5e7eb !important;
568
- background: transparent !important;
569
- }
570
-
571
- /* Gradio 4+ block chrome sometimes forces --block-border-color */
572
- .gradio-container .hdri-preview-image,
573
- .gradio-container .export-accordion {
574
- --block-border-width: 0px !important;
575
- --panel-border-width: 0 !important;
576
- }
577
-
578
- /* Shadow map preview: same flat frame as HDRI preview */
579
- .shadow-preview-image,
580
- .shadow-preview-image.panel,
581
- .shadow-preview-image .wrap,
582
- .shadow-preview-image .image-container,
583
- .shadow-preview-image .image-frame,
584
- .shadow-preview-image .image-wrapper,
585
- .shadow-preview-image [data-testid="image"],
586
- .shadow-preview-image img {
587
- border: none !important;
588
- outline: none !important;
589
- box-shadow: none !important;
590
- }
591
- .shadow-preview-image img { border-radius: 8px !important; }
592
- .gradio-container .shadow-preview-image {
593
- --block-border-width: 0px !important;
594
- --panel-border-width: 0 !important;
595
- }
596
-
597
- /* Main output tabs: larger, easier to spot */
598
- .main-output-tabs > .tab-nav,
599
- .main-output-tabs .tab-nav button {
600
- font-size: 0.95rem !important;
601
- font-weight: 600 !important;
602
- }
603
- .main-output-tabs .tab-nav button { padding: 0.45rem 0.9rem !important; }
604
-
605
- /* Status strip: one left accent only (Gradio panel also draws accent — disable it here) */
606
- .gradio-container .status-footer,
607
- .status-footer.panel,
608
- .status-footer.block {
609
- --block-border-width: 0px !important;
610
- --panel-border-width: 0px !important;
611
- }
612
- .status-footer {
613
- font-size: 0.8125rem !important;
614
- line-height: 1.45 !important;
615
- color: var(--body-text-color-subdued, #6b7280) !important;
616
- margin: 0 0 0.65rem 0 !important;
617
- padding: 0.5rem 0.65rem 0.5rem 0.7rem !important;
618
- background: var(--block-background-fill, #f9fafb) !important;
619
- /* Single box: one thick left edge (avoid stacking with Gradio .block border) */
620
- border-width: 1px 1px 1px 3px !important;
621
- border-style: solid !important;
622
- border-color: var(--border-color-primary, #e5e7eb) var(--border-color-primary, #e5e7eb)
623
- var(--border-color-primary, #e5e7eb) var(--color-accent, #2563eb) !important;
624
- border-radius: 8px !important;
625
- box-shadow: 0 1px 2px rgba(15, 23, 42, 0.05) !important;
626
- }
627
- .status-footer .form,
628
- .status-footer .wrap,
629
- .status-footer .prose,
630
- .status-footer .prose > *:first-child {
631
- border: none !important;
632
- box-shadow: none !important;
633
- }
634
- .status-footer .prose blockquote {
635
- border-left: none !important;
636
- padding-left: 0 !important;
637
- margin-left: 0 !important;
638
- }
639
- .status-footer p,
640
- .status-footer .prose p {
641
- margin: 0 !important;
642
- line-height: 1.05 !important;
643
- }
644
- .status-footer strong {
645
- color: var(--body-text-color, #374151) !important;
646
- font-weight: 600 !important;
647
- }
648
- .status-footer a {
649
- color: var(--link-text-color, var(--color-accent, #2563eb)) !important;
650
- text-decoration: none !important;
651
- }
652
- .status-footer a:hover { text-decoration: underline !important; }
653
-
654
- .ctrl-strip {
655
- border:1px solid #e5e7eb; border-radius:8px;
656
- padding:0.55rem 0.8rem 0.4rem; margin-bottom:0.6rem; background:#fff;
657
- }
658
- .ctrl-strip-title {
659
- font-size:0.72rem; font-weight:600; color:#9ca3af;
660
- text-transform:uppercase; letter-spacing:0.06em; margin-bottom:0.4rem;
661
- }
662
-
663
- .mat-label {
664
- font-size:0.72rem; font-weight:700; color:#9ca3af;
665
- text-transform:uppercase; letter-spacing:0.07em; margin:0.7rem 0 0.2rem;
666
- }
667
-
668
- .divider { border:none; border-top:1px solid #e5e7eb; margin:0.5rem 0; }
669
-
670
- .img-gallery table { display:grid !important; grid-template-columns:repeat(3,1fr) !important; gap:3px !important; }
671
- .img-gallery table thead { display:none !important; }
672
- .img-gallery table tr { display:contents !important; }
673
- .img-gallery table td { padding:0 !important; }
674
- .img-gallery table td img { width:100% !important; height:68px !important; object-fit:cover !important; border-radius:5px !important; }
675
-
676
- .hdri-gallery table { display:grid !important; grid-template-columns:repeat(2,1fr) !important; gap:3px !important; }
677
- .hdri-gallery table thead { display:none !important; }
678
- .hdri-gallery table tr { display:contents !important; }
679
- .hdri-gallery table td { padding:0 !important; font-size:0.76rem; text-align:center; word-break:break-all; }
680
-
681
- /* Right sidebar: align with TRELLIS-style narrow examples column */
682
- .sidebar-examples { min-width: 0 !important; }
683
- .sidebar-examples .label-wrap { font-size: 0.85rem !important; }
684
- .gradio-container .sidebar-examples table { width: 100% !important; }
685
-
686
- footer { display:none !important; }
687
  """
688
 
689
- NEAR_GRADIO_THEME = gr.themes.Base(
690
- primary_hue=gr.themes.colors.blue,
691
- secondary_hue=gr.themes.colors.blue,
692
- )
693
 
694
 
695
  def build_app() -> gr.Blocks:
696
- with gr.Blocks(
697
- title="NeAR",
698
- theme=NEAR_GRADIO_THEME,
699
- delete_cache=None,
700
- fill_width=True,
701
- ) as demo:
702
  asset_state = gr.State({})
703
 
704
  gr.Markdown(
705
- """
706
- ## Single Image to Relightable 3DGS with [NeAR](https://near-project.github.io/)
707
- * Upload an RGBA image (or load an existing SLaT), run **Generate Mesh** then **Generate / Load SLaT**, pick an HDRI, and use **Camera & HDRI** to relight.
708
- * Use **Geometry** for mesh / PBR preview, **Preview** for still renders, **Videos** for camera or HDRI paths; **Export PBR GLB** when you are happy with the result.
709
- * Texture style transfer is possible when the reference images used for **mesh** and **SLaT** are different.
710
- """,
711
- elem_classes=["near-app-header"],
712
  )
713
 
714
- _img_ex = [
715
- [str(p)]
716
- for p in sorted((APP_DIR / "assets/example_image").glob("*.png"))
717
- if not _path_is_git_lfs_pointer(p)
718
- ]
719
- _slat_ex = [
720
- [str(p)]
721
- for p in sorted((APP_DIR / "assets/example_slats").glob("*.npz"))
722
- if not _path_is_git_lfs_pointer(p)
723
- ]
724
- _hdri_ex = [
725
- [str(p)]
726
- for p in sorted((APP_DIR / "assets/hdris").glob("*.exr"))
727
- if not _path_is_git_lfs_pointer(p)
728
- ]
729
- if not _img_ex and (APP_DIR / "assets/example_image").is_dir():
730
- print(
731
- "[NeAR] WARNING: no usable PNG examples (empty dir or all Git LFS pointers).",
732
- flush=True,
733
- )
734
-
735
- with gr.Row(equal_height=False):
736
-
737
- with gr.Column(scale=1, min_width=360):
738
-
739
- with gr.Group():
740
- gr.HTML('<p class="section-kicker">Asset</p>')
741
- source_mode = gr.Radio(
742
- ["From Image", "From Existing SLaT"],
743
- value="From Image",
744
- label="",
745
- show_label=False,
746
- )
747
- with gr.Tabs(selected=0) as source_tabs:
748
-
749
- with gr.Tab("Image", id=0):
750
- image_input = gr.Image(
751
- label="Input Image", type="pil", image_mode="RGBA",
752
- value=str(DEFAULT_IMAGE) if DEFAULT_IMAGE.exists() else None,
753
- height=400,
754
- )
755
- seed = gr.Slider(0, MAX_SEED, value=43, step=1, label="Seed (SLaT)")
756
- mesh_button = gr.Button("① Generate Mesh", variant="primary", min_width=100)
757
-
758
- with gr.Tab("SLaT", id=1):
759
- slat_upload = gr.File(label="Upload SLaT (.npz)", file_types=[".npz"])
760
- slat_path_text = gr.Textbox(
761
- label="Or enter local path",
762
- placeholder="/path/to/sample_slat.npz",
763
- )
764
-
765
- slat_button = gr.Button(
766
- "② Generate / Load SLaT", variant="primary", min_width=100,
767
- )
768
-
769
- with gr.Group():
770
- gr.HTML('<p class="section-kicker">HDRI</p>')
771
- with gr.Column(elem_classes=["hdri-upload-zone"]):
772
- hdri_file = gr.File(
773
- label="Environment (.exr)", file_types=[".exr"],
774
- value=str(DEFAULT_HDRI) if DEFAULT_HDRI.exists() else None,
775
- elem_classes=["hdri-file-input"],
776
- )
777
- hdri_preview = gr.Image(
778
- label="Preview",
779
- interactive=False,
780
- height=130,
781
- container=False,
782
- elem_classes=["hdri-preview-image"],
783
- )
784
-
785
- with gr.Group():
786
- gr.HTML('<p class="section-kicker">Export</p>')
787
- with gr.Accordion(
788
- "Export Settings",
789
- open=False,
790
- elem_classes=["export-accordion"],
791
- ):
792
- with gr.Row():
793
- simplify = gr.Slider(0.8, 0.99, value=0.95, step=0.01, label="Mesh Simplify")
794
- texture_size = gr.Slider(512, 4096, value=2048, step=512, label="Texture Size")
795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796
  with gr.Row():
797
- clear_button = gr.Button("Clear Cache", variant="secondary", min_width=100)
798
-
799
- with gr.Column(scale=10, min_width=560):
 
 
 
 
 
 
 
800
 
801
- status_md = gr.Markdown(
802
- "Ready — use **Asset** (left) and **HDRI** to begin.",
803
- elem_classes=["status-footer"],
804
- )
805
 
 
 
806
 
807
- with gr.Group(elem_classes=["ctrl-strip"]):
808
- gr.HTML("<div class='ctrl-strip-title'>Camera &amp; HDRI</div>")
809
- with gr.Row():
810
- tone_mapper_name = gr.Dropdown(
811
- choices=AVAILABLE_TONE_MAPPERS,
812
- value="AgX",
813
- label="Tone Mapper",
814
- min_width=120,
815
- )
816
- hdri_rot = gr.Slider(0, 360, value=0, step=1, label="HDRI Rotation °")
817
- resolution = gr.Slider(256, 1024, value=512, step=256, label="Preview Res")
818
- with gr.Row():
819
- yaw = gr.Slider(0, 360, value=0, step=0.5, label="Yaw °")
820
- pitch = gr.Slider(-90, 90, value=0, step=0.5, label="Pitch °")
821
- fov = gr.Slider(10, 70, value=40, step=1, label="FoV")
822
- radius = gr.Slider(1.0, 4.0, value=2.0, step=0.05, label="Radius")
823
-
824
- tone_mapper_name.change(
825
- set_tone_mapper,
826
- inputs=[tone_mapper_name],
827
- outputs=[],
828
- )
829
-
830
- with gr.Tabs(elem_classes=["main-output-tabs"]):
831
-
832
- with gr.Tab("Geometry", id=0):
833
- with gr.Row():
834
- mesh_viewer = gr.Model3D(
835
- label="3D Mesh", interactive=False, height=520,
836
- )
837
- pbr_viewer = gr.Model3D(
838
- label="PBR GLB", interactive=False, height=520,
839
- )
840
- gr.HTML("<hr class='divider'>")
841
- with gr.Row():
842
- export_glb_button = gr.Button("Export PBR GLB", variant="primary", min_width=140)
843
-
844
- with gr.Tab("Preview", id=1):
845
- preview_button = gr.Button("Render Preview", variant="primary", min_width=100)
846
- gr.HTML("<hr class='divider'>")
847
- with gr.Row():
848
- color_output = gr.Image(label="Relit Result", interactive=False, height=400)
849
- with gr.Column():
850
- with gr.Row():
851
- base_color_output = gr.Image(label="Base Color", interactive=False, height=200)
852
- metallic_output = gr.Image(label="Metallic", interactive=False, height=200)
853
- with gr.Row():
854
- roughness_output = gr.Image(label="Roughness", interactive=False, height=200)
855
- shadow_output = gr.Image(label="Shadow", interactive=False, height=200)
856
-
857
- with gr.Tab("Videos", id=2):
858
- with gr.Accordion("Video Settings", open=False):
859
- with gr.Row():
860
- fps = gr.Slider(1, 60, value=24, step=1, label="FPS")
861
- num_views = gr.Slider(8, 120, value=40, step=1, label="Camera Frames")
862
- num_frames = gr.Slider(8, 120, value=40, step=1, label="HDRI Frames")
863
- with gr.Row():
864
- full_video = gr.Checkbox(label="Full composite video", value=True)
865
- shadow_video = gr.Checkbox(
866
- label="Include shadow in video",
867
- value=True,
868
- )
869
- with gr.Row():
870
- camera_video_button = gr.Button("Camera Path Video", variant="primary", min_width=100)
871
- hdri_video_button = gr.Button("HDRI Rotation Video", variant="primary", min_width=100)
872
- camera_video_output = gr.Video(
873
- label="Camera Path", autoplay=True, loop=True, height=340,
874
- )
875
- hdri_render_video_output = gr.Video(
876
- label="HDRI Rotation Render", autoplay=True, loop=True, height=300,
877
- )
878
- with gr.Accordion("HDRI Roll (environment panorama)", open=False):
879
- hdri_roll_video_output = gr.Video(
880
- label="HDRI Roll", autoplay=True, loop=True, height=180,
881
- )
882
-
883
- with gr.Column(scale=1, min_width=172):
884
- with gr.Column(visible=True, elem_classes=["sidebar-examples", "img-gallery"]) as col_img_examples:
885
- if _img_ex:
886
- gr.Examples(
887
- examples=_img_ex,
888
- inputs=[image_input],
889
- fn=preprocess_image_only,
890
- outputs=[image_input],
891
- run_on_click=True,
892
- examples_per_page=18,
893
- label="Examples",
894
- )
895
- else:
896
- gr.Markdown("*No PNG examples in `assets/example_image`*")
897
-
898
- with gr.Column(visible=False, elem_classes=["sidebar-examples"]) as col_slat_examples:
899
- if _slat_ex:
900
- gr.Examples(
901
- examples=_slat_ex,
902
- inputs=[slat_path_text],
903
- label="Example SLaTs",
904
- )
905
- else:
906
- gr.Markdown("*No `.npz` examples in `assets/example_slats`*")
907
-
908
- with gr.Column(visible=True, elem_classes=["sidebar-examples", "hdri-gallery"]) as col_hdri_examples:
909
- if _hdri_ex:
910
- gr.Examples(
911
- examples=_hdri_ex,
912
- inputs=[hdri_file],
913
- label="Example HDRIs",
914
- examples_per_page=8,
915
- )
916
- else:
917
- gr.Markdown("*No `.exr` examples in `assets/hdris`*")
918
 
919
  demo.load(start_session)
920
  demo.unload(end_session)
921
 
922
- source_mode.change(switch_asset_source, inputs=[source_mode], outputs=[source_tabs])
923
- source_mode.change(
924
- lambda m: (
925
- gr.update(visible=m == "From Image"),
926
- gr.update(visible=m == "From Existing SLaT"),
927
- ),
928
- inputs=[source_mode],
929
- outputs=[col_img_examples, col_slat_examples],
930
- )
931
-
932
- for _trigger in (hdri_file.upload, hdri_file.change):
933
- _trigger(
934
- preview_hdri,
935
- inputs=[hdri_file],
936
- outputs=[hdri_preview, status_md],
937
- )
938
-
939
- image_input.upload(
940
- preprocess_image_only,
941
- inputs=[image_input],
942
- outputs=[image_input],
943
- )
944
-
945
- mesh_button.click(
946
- generate_mesh,
947
- inputs=[image_input],
948
- outputs=[asset_state, mesh_viewer, status_md],
949
- )
950
 
951
- slat_button.click(
952
- prepare_slat,
953
- inputs=[source_mode, asset_state, image_input, seed, slat_upload, slat_path_text],
954
- outputs=[asset_state, status_md],
955
- )
956
-
957
- preview_button.click(
958
  render_preview,
959
- inputs=[asset_state, hdri_file, hdri_rot,
960
- yaw, pitch, fov, radius, resolution],
961
- outputs=[
962
- color_output,
963
- base_color_output,
964
- metallic_output,
965
- roughness_output,
966
- shadow_output,
967
- status_md,
968
- ],
969
  )
970
-
971
- camera_video_button.click(
972
- render_camera_video,
973
- inputs=[asset_state, hdri_file, hdri_rot,
974
- fps, num_views, fov, radius, full_video, shadow_video],
975
- outputs=[camera_video_output, status_md],
976
  )
977
-
978
- hdri_video_button.click(
979
- render_hdri_video,
980
- inputs=[asset_state, hdri_file,
981
- fps, num_frames, yaw, pitch, fov, radius, full_video, shadow_video],
982
- outputs=[hdri_roll_video_output, hdri_render_video_output, status_md],
983
  )
 
984
 
985
- export_glb_button.click(
986
- export_glb,
987
- inputs=[asset_state, hdri_file, hdri_rot, simplify, texture_size],
988
- outputs=[pbr_viewer, status_md],
989
- )
990
  return demo
991
 
992
 
993
- PIPELINE = NeARImageToRelightable3DPipeline.from_pretrained("luh0502/NeAR")
994
- GEOMETRY_PIPELINE = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained("tencent/Hunyuan3D-2.1")
 
995
 
996
- if spaces is not None:
997
- PIPELINE.to("cuda")
998
- GEOMETRY_PIPELINE.to("cuda")
999
 
1000
- demo = build_app()
1001
 
1002
  if __name__ == "__main__":
1003
- demo.launch(
1004
- mcp_server=True
1005
- )
 
1
+ """
2
+ NeAR Gradio Space — streamlined pipeline for ZeroGPU.
3
+
4
+ Session state is path-only (mesh + SLaT on disk). No in-memory SLaT cache.
5
+ Geometry can offload to CPU after mesh export to free VRAM for NeAR.
6
+
7
+ Legacy full UI: see app_legacy.py.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import gc
13
  import os
 
14
  import shutil
15
+ import sys
16
  import threading
17
  import time
18
  from pathlib import Path
19
  from typing import Any, Dict, Optional
20
 
21
  import gradio as gr
 
 
 
 
 
22
  import imageio
23
  import numpy as np
24
  import torch
 
26
  from PIL import Image
27
  from simple_ocio import ToneMapper # pyright: ignore[reportMissingImports]
28
 
29
+ if not os.environ.get("HF_TOKEN") and not os.environ.get("HUGGING_FACE_HUB_TOKEN"):
30
+ _hub_tok = (os.environ.get("near") or os.environ.get("NEAR") or "").strip()
31
+ if _hub_tok:
32
+ os.environ["HF_TOKEN"] = _hub_tok
33
+
34
+ try:
35
+ _raw_z = int(os.environ.get("NEAR_ZEROGPU_HF_CEILING_S", "90"))
36
+ except ValueError:
37
+ _raw_z = 90
38
+ _ZCAP = min(max(15, _raw_z), 120)
39
+ for _k in ("NEAR_ZEROGPU_MAX_SECONDS", "NEAR_ZEROGPU_DURATION_CAP"):
40
+ if _k in os.environ:
41
+ try:
42
+ if int(os.environ[_k]) > _ZCAP:
43
+ os.environ[_k] = str(_ZCAP)
44
+ except ValueError:
45
+ pass
46
+
47
+ try:
48
+ import spaces # pyright: ignore[reportMissingImports]
49
+ except ImportError:
50
+ spaces = None
51
+
52
  sys.path.insert(0, "./hy3dshape")
53
  os.environ.setdefault("ATTN_BACKEND", "xformers")
54
  os.environ.setdefault("SPCONV_ALGO", "native")
55
  os.environ.setdefault("TORCH_CUDA_ARCH_LIST", "7.5;8.0;8.6;8.9;9.0")
56
 
 
 
57
  from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline # pyright: ignore[reportMissingImports]
58
+ from hy3dshape.rembg import BackgroundRemover # pyright: ignore[reportMissingImports]
59
+ from trellis.pipelines import NeARImageToRelightable3DPipeline
60
 
61
  GPU = spaces.GPU if spaces is not None else (lambda f: f)
62
 
 
64
  CACHE_DIR = APP_DIR / "tmp_gradio"
65
  CACHE_DIR.mkdir(exist_ok=True)
66
 
67
+ _MODEL_LOCK = threading.Lock()
68
+ PIPELINE: Optional[NeARImageToRelightable3DPipeline] = None
69
+ GEOMETRY_PIPELINE: Optional[Hunyuan3DDiTFlowMatchingPipeline] = None
70
+ _NEAR_ON_CUDA = False
71
+ _GEOMETRY_ON_CUDA = False
72
+
73
+ tone_mapper = ToneMapper()
74
+ AVAILABLE_TONE_MAPPERS = getattr(tone_mapper, "available_views", ["AgX"])
75
+ LIGHT_PREPROCESSOR = BackgroundRemover()
76
+
77
+ DEFAULT_IMAGE = APP_DIR / "assets/example_image/T.png"
78
+ DEFAULT_HDRI = APP_DIR / "assets/hdris/studio_small_03_1k.exr"
79
+ MAX_SEED = np.iinfo(np.int32).max
80
+
81
+
82
+ def _truthy_env(name: str, default: str) -> bool:
83
+ v = (os.environ.get(name) if name in os.environ else default).strip().lower()
84
+ return v in ("1", "true", "yes", "on")
85
+
86
+
87
+ _CPU_PRELOAD_AT_START = _truthy_env("NEAR_MODEL_CPU_PRELOAD_AT_START", "0")
88
+ _OFFLOAD_GEOMETRY_AFTER_MESH = _truthy_env(
89
+ "NEAR_GEOMETRY_OFFLOAD_AFTER_MESH", "1" if spaces is not None else "0"
90
+ )
91
+
92
 
93
  def _path_is_git_lfs_pointer(p: Path) -> bool:
94
  try:
95
+ if not p.is_file() or p.stat().st_size > 512:
96
  return False
97
+ return p.read_bytes()[:120].startswith(b"version https://git-lfs.github.com/spec/v1")
 
 
 
98
  except OSError:
99
  return False
100
 
 
102
  def _warn_example_assets() -> None:
103
  img_dir = APP_DIR / "assets/example_image"
104
  if not img_dir.is_dir():
105
+ print("[NeAR] WARNING: assets/example_image/ missing.", flush=True)
 
 
 
106
  return
107
  sample = img_dir / "T.png"
108
  if sample.is_file() and _path_is_git_lfs_pointer(sample):
109
+ print("[NeAR] WARNING: assets look like Git LFS pointers.", flush=True)
 
 
 
 
110
 
111
 
112
  _warn_example_assets()
113
 
 
 
 
114
 
115
+ def _ensure_near_loaded_on_cpu_locked() -> None:
116
+ global PIPELINE
117
+ if PIPELINE is not None:
118
+ return
119
+ print("[NeAR] Loading NeAR pipeline…", flush=True)
120
+ PIPELINE = NeARImageToRelightable3DPipeline.from_pretrained("luh0502/NeAR")
121
+ if _CPU_PRELOAD_AT_START:
122
+ PIPELINE.to("cpu")
123
+
124
+
125
+ def _ensure_geometry_loaded_on_cpu_locked() -> None:
126
+ global GEOMETRY_PIPELINE
127
+ if GEOMETRY_PIPELINE is not None:
128
+ return
129
+ print("[NeAR] Loading Hunyuan geometry pipeline…", flush=True)
130
+ GEOMETRY_PIPELINE = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained("tencent/Hunyuan3D-2.1")
131
+ if _CPU_PRELOAD_AT_START:
132
+ GEOMETRY_PIPELINE.to("cpu")
133
+
134
+
135
+ def run_model_cpu_preload_blocking() -> None:
136
+ """Load weights on CPU before Gradio binds a port (no GPU lease)."""
137
+ if not _CPU_PRELOAD_AT_START:
138
+ return
139
+ with _MODEL_LOCK:
140
+ _ensure_near_loaded_on_cpu_locked()
141
+ _ensure_geometry_loaded_on_cpu_locked()
142
+ print("[NeAR] CPU preload done.", flush=True)
143
+
144
+
145
+ def ensure_near_on_cuda() -> None:
146
+ global _NEAR_ON_CUDA
147
+ with _MODEL_LOCK:
148
+ _ensure_near_loaded_on_cpu_locked()
149
+ if torch.cuda.is_available() and not _NEAR_ON_CUDA:
150
+ assert PIPELINE is not None
151
+ PIPELINE.to("cuda")
152
+ _NEAR_ON_CUDA = True
153
+
154
+
155
+ def ensure_geometry_on_cuda() -> None:
156
+ global _GEOMETRY_ON_CUDA
157
+ with _MODEL_LOCK:
158
+ _ensure_geometry_loaded_on_cpu_locked()
159
+ if torch.cuda.is_available() and not _GEOMETRY_ON_CUDA:
160
+ assert GEOMETRY_PIPELINE is not None
161
+ GEOMETRY_PIPELINE.to("cuda")
162
+ _GEOMETRY_ON_CUDA = True
163
+
164
+
165
+ def _try_release_cuda_memory() -> None:
166
+ gc.collect()
167
+ if torch.cuda.is_available():
168
+ torch.cuda.empty_cache()
169
 
170
+
171
+ def _save_slat_npz(slat: Any, path: Path) -> None:
172
+ feats = slat.feats.detach().cpu().numpy()
173
+ coords = slat.coords.detach().cpu().numpy()
174
+ np.savez_compressed(path, feats=feats, coords=coords)
 
 
 
 
175
 
176
 
177
  def get_file_path(file_obj: Any) -> Optional[str]:
 
186
  return None
187
 
188
 
189
+ def start_session(req: gr.Request) -> None:
190
+ user_dir = CACHE_DIR / str(req.session_hash)
191
+ os.makedirs(user_dir, exist_ok=True)
192
+
193
+
194
+ def end_session(req: gr.Request) -> None:
195
+ user_dir = CACHE_DIR / str(req.session_hash)
196
+ shutil.rmtree(user_dir, ignore_errors=True)
197
 
 
 
198
 
199
+ def set_tone_mapper(view_name: str) -> None:
200
  if view_name and PIPELINE is not None:
201
  PIPELINE.setup_tone_mapper(view_name)
202
 
203
+
204
+ def preview_hdri(hdri_file_obj: Any) -> tuple[Optional[np.ndarray], str]:
205
+ hdri_path = get_file_path(hdri_file_obj)
206
+ if not hdri_path:
207
+ return None, "Upload an HDRI `.exr`."
208
+ import pyexr # pyright: ignore[reportMissingImports]
209
+
210
+ hdri_np = pyexr.read(hdri_path)[..., :3]
211
+ tm = ToneMapper(view="Khronos PBR Neutral")
212
+ prev = tm.hdr_to_ldr(hdri_np)
213
+ prev = (np.clip(prev, 0, 1) * 255).astype(np.uint8)
214
+ return prev, f"HDRI **{Path(hdri_path).name}**"
215
+
216
+
217
+ def _ensure_rgba(img: Image.Image) -> Image.Image:
218
+ return img if img.mode == "RGBA" else img.convert("RGBA")
219
 
220
 
221
  def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image:
 
224
  if image.mode == "RGBA":
225
  alpha = np.array(image)[:, :, 3]
226
  has_alpha = not np.all(alpha == 255)
 
227
  if has_alpha:
228
  output = image
229
  else:
230
  rgb = image.convert("RGB")
231
  max_size = max(rgb.size)
232
+ scale = min(1, 1024 / max_size) if max_size else 1
233
  if scale < 1:
234
  rgb = rgb.resize(
235
  (int(rgb.width * scale), int(rgb.height * scale)),
236
  Image.Resampling.LANCZOS,
237
  )
238
  output = LIGHT_PREPROCESSOR(rgb)
 
239
  if output.mode != "RGBA":
240
  output = output.convert("RGBA")
241
  output_np = np.array(output)
 
261
  return output.crop(padded_bbox).resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA")
262
 
263
 
264
+ @torch.inference_mode()
265
+ def preprocess_image_only(image_input: Optional[Image.Image]) -> Optional[Image.Image]:
266
+ if image_input is None:
267
+ return None
268
+ return _preprocess_image_rgba_light(image_input)
 
 
 
 
 
 
 
 
 
 
 
269
 
270
 
271
+ def _flatten_rgba_on_matte(image: Image.Image, matte_rgb: tuple[float, float, float]) -> Image.Image:
272
+ return NeARImageToRelightable3DPipeline.flatten_rgba_on_matte(image, matte_rgb)
273
 
274
 
275
+ def _require_slat_path(st: Dict[str, Any]) -> str:
276
+ p = st.get("slat_path")
277
+ if not p or not os.path.isfile(str(p)):
278
+ raise gr.Error("Generate or load a SLaT first.")
279
+ return str(p)
280
 
281
 
282
+ def _require_hdri_path(hdri_obj: Any) -> str:
283
+ p = get_file_path(hdri_obj)
284
+ if not p or not os.path.isfile(p):
285
+ raise gr.Error("Upload an HDRI `.exr`.")
286
+ return p
287
 
288
 
289
  @GPU
 
291
  def generate_mesh(
292
  image_input: Optional[Image.Image],
293
  req: gr.Request,
294
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
295
+ ) -> tuple[Dict[str, Any], str, str]:
296
+ ensure_geometry_on_cuda()
 
297
  if image_input is None:
298
+ raise gr.Error("Upload an input image.")
299
+ session_dir = CACHE_DIR / str(req.session_hash)
300
+ session_dir.mkdir(parents=True, exist_ok=True)
301
 
302
  rgba = _ensure_rgba(image_input)
303
  if rgba.size != (518, 518):
304
  rgba = _preprocess_image_rgba_light(rgba)
 
305
  mesh_rgb = _flatten_rgba_on_matte(rgba, (1.0, 1.0, 1.0))
306
  rgba.save(session_dir / "input_preprocessed_rgba.png")
307
  mesh_rgb.save(session_dir / "input_processed.png")
308
 
309
+ progress(0.5, desc="Geometry (Hunyuan)")
310
+ assert GEOMETRY_PIPELINE is not None
311
  mesh = GEOMETRY_PIPELINE(image=mesh_rgb)[0]
312
  mesh_path = session_dir / "initial_3d_shape.glb"
313
  mesh.export(mesh_path)
314
+ del mesh
315
+ _try_release_cuda_memory()
316
+
317
+ global _GEOMETRY_ON_CUDA
318
+ if _OFFLOAD_GEOMETRY_AFTER_MESH and GEOMETRY_PIPELINE is not None and torch.cuda.is_available():
319
+ with _MODEL_LOCK:
320
+ GEOMETRY_PIPELINE.to("cpu")
321
+ _GEOMETRY_ON_CUDA = False
322
+ _try_release_cuda_memory()
323
 
324
+ state: Dict[str, Any] = {
 
 
325
  "mesh_path": str(mesh_path),
 
326
  "slat_path": None,
327
+ "processed_image_path": str(session_dir / "input_processed.png"),
328
  }
329
+ return state, str(mesh_path), "**① Mesh ready** — run **② SLaT** next."
 
 
 
 
330
 
331
 
332
  @GPU
333
  @torch.inference_mode()
334
+ def generate_slat(
335
  asset_state: Dict[str, Any],
336
  image_input: Optional[Image.Image],
337
  seed: int,
338
  req: gr.Request,
339
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
340
+ ) -> tuple[Dict[str, Any], str]:
341
+ ensure_near_on_cuda()
342
+ if not asset_state.get("mesh_path"):
343
+ raise gr.Error("Run **Geometry** first.")
344
  mesh_path = asset_state["mesh_path"]
345
+ if not os.path.isfile(mesh_path):
346
+ raise gr.Error("Mesh missing — regenerate geometry.")
347
 
348
  if image_input is None:
349
+ raise gr.Error("Image required for SLaT.")
350
+ session_dir = CACHE_DIR / str(req.session_hash)
351
 
352
+ progress(0.15, desc="Load mesh")
353
  mesh = trimesh.load(mesh_path, force="mesh")
354
  rgba = _ensure_rgba(image_input)
355
  if rgba.size != (518, 518):
356
  rgba = _preprocess_image_rgba_light(rgba)
357
  slat_rgb = _flatten_rgba_on_matte(rgba, (0.0, 0.0, 0.0))
358
 
359
+ assert PIPELINE is not None
360
+ progress(0.35, desc="SLaT coords")
361
  coords = PIPELINE.shape_to_coords(mesh)
362
+ del mesh
363
+ _try_release_cuda_memory()
364
 
365
+ progress(0.55, desc="SLaT sample")
366
  slat = PIPELINE.run_with_coords([slat_rgb], coords, seed=int(seed), preprocess_image=False)
367
+ del coords
368
 
369
+ slat_path = session_dir / "session_slat.npz"
370
+ _save_slat_npz(slat, slat_path)
371
+ del slat
372
+ _try_release_cuda_memory()
373
 
374
+ new_state = {**asset_state, "slat_path": str(slat_path)}
375
+ return new_state, f"**② SLaT saved** `{slat_path.name}`"
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
 
378
+ def load_slat_npz(
 
 
 
 
 
 
379
  slat_upload: Any,
380
  slat_path_text: str,
381
  req: gr.Request,
382
+ ) -> tuple[Dict[str, Any], str]:
383
+ resolved = get_file_path(slat_upload) or (slat_path_text.strip() if slat_path_text else "")
384
+ if not resolved or not os.path.isfile(resolved):
385
+ raise gr.Error("Provide a valid `.npz` path or upload.")
386
+ session_dir = CACHE_DIR / str(req.session_hash)
387
+ session_dir.mkdir(parents=True, exist_ok=True)
388
+ state: Dict[str, Any] = {
389
+ "mesh_path": None,
390
+ "slat_path": resolved,
391
+ "processed_image_path": None,
392
+ }
393
+ return state, f"SLaT loaded: **{Path(resolved).name}**"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
 
395
 
396
  @GPU
 
404
  fov: float,
405
  radius: float,
406
  resolution: int,
407
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
408
+ ) -> tuple[Any, Any, Any, Any, Any, str]:
409
+ ensure_near_on_cuda()
410
+ slat_path = _require_slat_path(asset_state)
411
+ hdri_path = _require_hdri_path(hdri_file_obj)
412
+ assert PIPELINE is not None
413
+ progress(0.2, desc="Load SLaT / HDRI")
414
+ slat = PIPELINE.load_slat(slat_path)
415
+ hdri_np = PIPELINE.load_hdri(hdri_path)
416
+ progress(0.6, desc="Render")
417
  views = PIPELINE.render_view(
418
+ slat,
419
+ hdri_np,
420
+ yaw_deg=yaw,
421
+ pitch_deg=pitch,
422
+ fov=fov,
423
+ radius=radius,
424
+ hdri_rot_deg=hdri_rot,
425
+ resolution=int(resolution),
 
 
 
 
426
  )
427
+ del slat, hdri_np
428
+ _try_release_cuda_memory()
429
+ msg = f"Preview · yaw {yaw:.0f}° pitch {pitch:.0f}°"
430
  return (
431
  views["color"],
432
  views["base_color"],
 
438
 
439
 
440
  @GPU
441
+ def export_pbr_glb(
 
442
  asset_state: Dict[str, Any],
443
  hdri_file_obj: Any,
444
  hdri_rot: float,
445
+ simplify: float,
446
+ texture_size: int,
 
 
 
 
447
  req: gr.Request,
448
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
449
+ ) -> tuple[str, str]:
450
+ ensure_near_on_cuda()
451
+ slat_path = _require_slat_path(asset_state)
452
+ hdri_path = _require_hdri_path(hdri_file_obj)
453
  session_dir = CACHE_DIR / str(req.session_hash)
454
+ assert PIPELINE is not None
455
+ progress(0.15, desc="Load assets")
456
+ slat = PIPELINE.load_slat(slat_path)
457
+ hdri_np = PIPELINE.load_hdri(hdri_path)
458
+ progress(0.5, desc="Bake PBR GLB")
459
+ glb = PIPELINE.export_glb_from_slat(
460
+ slat,
461
+ hdri_np,
462
+ hdri_rot_deg=hdri_rot,
463
+ base_mesh=None,
464
+ simplify=float(simplify),
465
+ texture_size=int(texture_size),
466
+ fill_holes=True,
467
  )
468
+ del slat, hdri_np
469
+ _try_release_cuda_memory()
470
+ out = session_dir / "near_pbr.glb"
471
+ glb.export(out)
472
+ del glb
473
+ _try_release_cuda_memory()
474
+ return str(out), f"**③ PBR GLB** — `{out.name}`"
475
 
476
 
477
  @GPU
478
  @torch.inference_mode()
479
+ def render_dual_lighting_videos(
480
  asset_state: Dict[str, Any],
481
  hdri_file_obj: Any,
482
+ hdri_rot: float,
483
  fps: int,
484
+ num_cam_views: int,
485
+ num_hdri_frames: int,
486
  yaw: float,
487
  pitch: float,
488
  fov: float,
489
  radius: float,
 
 
490
  req: gr.Request,
491
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
492
+ ) -> tuple[str, str, str, str]:
493
+ """One click: (1) camera orbit composite, (2) HDRI rotation composite, (3) env roll."""
494
+ ensure_near_on_cuda()
495
+ slat_path = _require_slat_path(asset_state)
496
+ hdri_path = _require_hdri_path(hdri_file_obj)
497
  session_dir = CACHE_DIR / str(req.session_hash)
498
+ assert PIPELINE is not None
499
+
500
+ progress(0.05, desc="Load SLaT / HDRI")
501
+ slat = PIPELINE.load_slat(slat_path)
502
+ hdri_np = PIPELINE.load_hdri(hdri_path)
503
+
504
+ progress(0.12, desc="Video A: camera orbit (color + PBR + shadow strip)")
505
+ cam_frames = PIPELINE.render_camera_path_video(
506
+ slat,
507
+ hdri_np,
508
+ num_views=int(num_cam_views),
509
+ fov=float(fov),
510
+ radius=float(radius),
511
+ hdri_rot_deg=float(hdri_rot),
512
+ full_video=True,
513
+ shadow_video=True,
514
+ bg_color=(1, 1, 1),
515
+ verbose=True,
516
+ )
517
+ p_cam = session_dir / "video_camera_orbit_full.mp4"
518
+ imageio.mimsave(p_cam, cam_frames, fps=int(fps))
519
+ del cam_frames
520
+ _try_release_cuda_memory()
521
+
522
+ progress(0.48, desc="Video B: HDRI rotation (same strip layout)")
523
+ roll_frames, hdri_render_frames = PIPELINE.render_hdri_rotation_video(
524
+ slat,
525
+ hdri_np,
526
+ num_frames=int(num_hdri_frames),
527
+ yaw_deg=float(yaw),
528
+ pitch_deg=float(pitch),
529
+ fov=float(fov),
530
+ radius=float(radius),
531
+ full_video=True,
532
+ shadow_video=True,
533
+ bg_color=(1, 1, 1),
534
+ verbose=True,
535
  )
536
+ p_lit = session_dir / "video_hdri_rotation_full.mp4"
537
+ p_roll = session_dir / "video_hdri_environment_roll.mp4"
538
+ imageio.mimsave(p_lit, hdri_render_frames, fps=int(fps))
539
+ imageio.mimsave(p_roll, roll_frames, fps=int(fps))
540
+ del roll_frames, hdri_render_frames
541
+ del slat, hdri_np
542
+ _try_release_cuda_memory()
543
 
544
+ msg = "**④ Videos** — camera orbit + HDRI lighting + env roll."
545
+ return str(p_cam), str(p_lit), str(p_roll), msg
546
 
547
+
548
+ def clear_session_cache(req: gr.Request) -> str:
 
 
 
 
 
 
 
 
 
549
  session_dir = CACHE_DIR / str(req.session_hash)
550
+ shutil.rmtree(session_dir, ignore_errors=True)
551
+ session_dir.mkdir(parents=True, exist_ok=True)
552
+ _try_release_cuda_memory()
553
+ return "Session cache cleared."
554
 
555
+
556
+ MIN_CSS = """
557
+ .gradio-container { max-width: 100% !important; }
558
+ footer { display: none !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
  """
560
 
561
+ THEME = gr.themes.Base(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.blue)
 
 
 
562
 
563
 
564
  def build_app() -> gr.Blocks:
565
+ with gr.Blocks(title="NeAR", theme=THEME, css=MIN_CSS, fill_width=True) as demo:
 
 
 
 
 
566
  asset_state = gr.State({})
567
 
568
  gr.Markdown(
569
+ "### NeAR — relightable 3D (ZeroGPU)\n"
570
+ "Linear steps: **① Geometry** **② SLaT** → preview / **③ PBR GLB** → **④ dual videos**. "
571
+ "SLaT is stored on disk only. Previous app UI: `app_legacy.py`."
 
 
 
 
572
  )
573
 
574
+ with gr.Row():
575
+ with gr.Column(scale=1):
576
+ gr.Markdown("**Input**")
577
+ image_input = gr.Image(
578
+ label="Image (RGBA)",
579
+ type="pil",
580
+ image_mode="RGBA",
581
+ value=str(DEFAULT_IMAGE) if DEFAULT_IMAGE.exists() else None,
582
+ height=320,
583
+ )
584
+ seed = gr.Slider(0, MAX_SEED, value=43, step=1, label="SLaT seed")
585
+ btn_mesh = gr.Button("① Geometry (mesh)", variant="primary")
586
+ btn_slat = gr.Button("② SLaT (from mesh + image)", variant="primary")
587
+ gr.Markdown("Or load `.npz`:")
588
+ slat_up = gr.File(label="SLaT .npz", file_types=[".npz"])
589
+ slat_txt = gr.Textbox(label="Or path", placeholder="/path/to/slat.npz")
590
+ btn_load_slat = gr.Button("Load SLaT file")
591
+
592
+ gr.Markdown("**HDRI**")
593
+ hdri_file = gr.File(
594
+ label="Environment .exr",
595
+ file_types=[".exr"],
596
+ value=str(DEFAULT_HDRI) if DEFAULT_HDRI.exists() else None,
597
+ )
598
+ hdri_preview = gr.Image(label="HDRI preview", interactive=False, height=120)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599
 
600
+ tone_mapper_name = gr.Dropdown(
601
+ choices=AVAILABLE_TONE_MAPPERS,
602
+ value="AgX",
603
+ label="Tone mapper",
604
+ )
605
+ hdri_rot = gr.Slider(0, 360, value=0, step=1, label="HDRI rotation °")
606
+ with gr.Accordion("Preview camera", open=False):
607
+ yaw = gr.Slider(0, 360, value=0, step=0.5, label="Yaw °")
608
+ pitch = gr.Slider(-90, 90, value=0, step=0.5, label="Pitch °")
609
+ fov = gr.Slider(10, 70, value=40, step=1, label="FoV")
610
+ radius = gr.Slider(1.0, 4.0, value=2.0, step=0.05, label="Radius")
611
+ resolution = gr.Slider(256, 1024, value=512, step=256, label="Preview res")
612
+
613
+ with gr.Accordion("Export / video", open=True):
614
+ simplify = gr.Slider(0.8, 0.99, value=0.95, step=0.01, label="GLB simplify")
615
+ texture_size = gr.Slider(512, 4096, value=2048, step=512, label="Texture px")
616
+ fps = gr.Slider(8, 48, value=24, step=1, label="Video FPS")
617
+ num_cam = gr.Slider(8, 96, value=36, step=1, label="Camera path frames")
618
+ num_hdri = gr.Slider(8, 96, value=36, step=1, label="HDRI rotation frames")
619
+
620
+ btn_preview = gr.Button("Render preview (still)")
621
+ btn_glb = gr.Button("③ Export PBR GLB", variant="primary")
622
+ btn_videos = gr.Button("④ Dual lighting videos (one click)", variant="primary")
623
+ btn_clear = gr.Button("Clear session cache")
624
+
625
+ status = gr.Markdown("Ready.")
626
+
627
+ with gr.Column(scale=2):
628
+ mesh_view = gr.Model3D(label="Mesh", height=420)
629
+ pbr_view = gr.Model3D(label="PBR GLB", height=420)
630
  with gr.Row():
631
+ c0 = gr.Image(label="Relit", height=280)
632
+ bc = gr.Image(label="Base color", height=280)
633
+ with gr.Row():
634
+ mt = gr.Image(label="Metallic", height=280)
635
+ rg = gr.Image(label="Roughness", height=280)
636
+ sh = gr.Image(label="Shadow", height=280)
637
+ with gr.Row():
638
+ v_cam = gr.Video(label="A: Camera orbit (full strip)", height=260)
639
+ v_hdri = gr.Video(label="B: HDRI rotation (full strip)", height=260)
640
+ v_roll = gr.Video(label="HDRI env roll", height=180)
641
 
642
+ tone_mapper_name.change(set_tone_mapper, [tone_mapper_name], [])
 
 
 
643
 
644
+ for _t in (hdri_file.upload, hdri_file.change):
645
+ _t(preview_hdri, [hdri_file], [hdri_preview, status])
646
 
647
+ image_input.upload(preprocess_image_only, [image_input], [image_input])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648
 
649
  demo.load(start_session)
650
  demo.unload(end_session)
651
 
652
+ btn_mesh.click(generate_mesh, [image_input], [asset_state, mesh_view, status])
653
+ btn_slat.click(generate_slat, [asset_state, image_input, seed], [asset_state, status])
654
+ btn_load_slat.click(load_slat_npz, [slat_up, slat_txt], [asset_state, status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
655
 
656
+ btn_preview.click(
 
 
 
 
 
 
657
  render_preview,
658
+ [asset_state, hdri_file, hdri_rot, yaw, pitch, fov, radius, resolution],
659
+ [c0, bc, mt, rg, sh, status],
 
 
 
 
 
 
 
 
660
  )
661
+ btn_glb.click(
662
+ export_pbr_glb,
663
+ [asset_state, hdri_file, hdri_rot, simplify, texture_size],
664
+ [pbr_view, status],
 
 
665
  )
666
+ btn_videos.click(
667
+ render_dual_lighting_videos,
668
+ [asset_state, hdri_file, hdri_rot, fps, num_cam, num_hdri, yaw, pitch, fov, radius],
669
+ [v_cam, v_hdri, v_roll, status],
 
 
670
  )
671
+ btn_clear.click(clear_session_cache, [], [status])
672
 
 
 
 
 
 
673
  return demo
674
 
675
 
676
+ demo = build_app()
677
+ demo.queue(max_size=8)
678
+
679
 
680
+ def _near_launch() -> None:
681
+ run_model_cpu_preload_blocking()
682
+ demo.launch(mcp_server=True)
683
 
 
684
 
685
  if __name__ == "__main__":
686
+ _near_launch()
 
 
app_legacy.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import shutil
4
+ import threading
5
+ import time
6
+ from pathlib import Path
7
+ from typing import Any, Dict, Optional
8
+
9
+ import gradio as gr
10
+
11
+ try:
12
+ import spaces # pyright: ignore[reportMissingImports]
13
+ except ImportError:
14
+ spaces = None
15
+ import imageio
16
+ import numpy as np
17
+ import torch
18
+ import trimesh
19
+ from PIL import Image
20
+ from simple_ocio import ToneMapper # pyright: ignore[reportMissingImports]
21
+
22
+ sys.path.insert(0, "./hy3dshape")
23
+ os.environ.setdefault("ATTN_BACKEND", "xformers")
24
+ os.environ.setdefault("SPCONV_ALGO", "native")
25
+ os.environ.setdefault("TORCH_CUDA_ARCH_LIST", "7.5;8.0;8.6;8.9;9.0")
26
+
27
+
28
+ from trellis.pipelines import NeARImageToRelightable3DPipeline
29
+ from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline # pyright: ignore[reportMissingImports]
30
+
31
+ GPU = spaces.GPU if spaces is not None else (lambda f: f)
32
+
33
+ APP_DIR = Path(__file__).resolve().parent
34
+ CACHE_DIR = APP_DIR / "tmp_gradio"
35
+ CACHE_DIR.mkdir(exist_ok=True)
36
+
37
+
38
+ def _path_is_git_lfs_pointer(p: Path) -> bool:
39
+ try:
40
+ if not p.is_file():
41
+ return False
42
+ if p.stat().st_size > 512:
43
+ return False
44
+ head = p.read_bytes()[:120]
45
+ return head.startswith(b"version https://git-lfs.github.com/spec/v1")
46
+ except OSError:
47
+ return False
48
+
49
+
50
+ def _warn_example_assets() -> None:
51
+ img_dir = APP_DIR / "assets/example_image"
52
+ if not img_dir.is_dir():
53
+ print(
54
+ "[NeAR] WARNING: assets/example_image/ is missing — commit and push the full assets/ tree.",
55
+ flush=True,
56
+ )
57
+ return
58
+ sample = img_dir / "T.png"
59
+ if sample.is_file() and _path_is_git_lfs_pointer(sample):
60
+ print(
61
+ "[NeAR] WARNING: assets look like Git LFS pointers (not real PNG/NPZ/EXR bytes). "
62
+ "Run: git lfs install && git lfs push --all origin (from a clone that has full files).",
63
+ flush=True,
64
+ )
65
+
66
+
67
+ _warn_example_assets()
68
+
69
+ DEFAULT_IMAGE = APP_DIR / "assets/example_image/T.png"
70
+ DEFAULT_HDRI = APP_DIR / "assets/hdris/studio_small_03_1k.exr"
71
+ MAX_SEED = np.iinfo(np.int32).max
72
+
73
+
74
+ def start_session(req: gr.Request):
75
+ user_dir = CACHE_DIR / str(req.session_hash)
76
+ os.makedirs(user_dir, exist_ok=True)
77
+
78
+
79
+ def end_session(req: gr.Request):
80
+ user_dir = CACHE_DIR / str(req.session_hash)
81
+ shutil.rmtree(user_dir)
82
+ _SESSION_SLAT.pop(str(req.session_hash), None)
83
+
84
+
85
+ def get_file_path(file_obj: Any) -> Optional[str]:
86
+ if file_obj is None:
87
+ return None
88
+ if isinstance(file_obj, str):
89
+ return file_obj
90
+ for attr in ("name", "path", "value"):
91
+ v = getattr(file_obj, attr, None)
92
+ if isinstance(v, str) and v:
93
+ return v
94
+ return None
95
+
96
+
97
+ PIPELINE: Optional[NeARImageToRelightable3DPipeline] = None
98
+ GEOMETRY_PIPELINE: Optional[Hunyuan3DDiTFlowMatchingPipeline] = None
99
+ tone_mapper = ToneMapper()
100
+ AVAILABLE_TONE_MAPPERS = getattr(tone_mapper, "available_views", ["AgX"])
101
+
102
+ # In-process SLaT for the image workflow (not serialized through Gradio State).
103
+ _SESSION_SLAT: Dict[str, Any] = {}
104
+
105
+ def set_tone_mapper(view_name: str):
106
+ if view_name and PIPELINE is not None:
107
+ PIPELINE.setup_tone_mapper(view_name)
108
+
109
+ from hy3dshape.rembg import BackgroundRemover # pyright: ignore[reportMissingImports]
110
+ LIGHT_PREPROCESSOR = BackgroundRemover()
111
+
112
+
113
+ def _preprocess_image_rgba_light(input_image: Image.Image) -> Image.Image:
114
+ image = _ensure_rgba(input_image)
115
+ has_alpha = False
116
+ if image.mode == "RGBA":
117
+ alpha = np.array(image)[:, :, 3]
118
+ has_alpha = not np.all(alpha == 255)
119
+
120
+ if has_alpha:
121
+ output = image
122
+ else:
123
+ rgb = image.convert("RGB")
124
+ max_size = max(rgb.size)
125
+ scale = min(1, 1024 / max_size)
126
+ if scale < 1:
127
+ rgb = rgb.resize(
128
+ (int(rgb.width * scale), int(rgb.height * scale)),
129
+ Image.Resampling.LANCZOS,
130
+ )
131
+ output = LIGHT_PREPROCESSOR(rgb)
132
+
133
+ if output.mode != "RGBA":
134
+ output = output.convert("RGBA")
135
+ output_np = np.array(output)
136
+ alpha = output_np[:, :, 3]
137
+ bbox = np.argwhere(alpha > 0.8 * 255)
138
+ if bbox.size == 0:
139
+ return output.resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA")
140
+ crop_bbox = (
141
+ int(np.min(bbox[:, 1])),
142
+ int(np.min(bbox[:, 0])),
143
+ int(np.max(bbox[:, 1])),
144
+ int(np.max(bbox[:, 0])),
145
+ )
146
+ center = ((crop_bbox[0] + crop_bbox[2]) / 2, (crop_bbox[1] + crop_bbox[3]) / 2)
147
+ size = max(crop_bbox[2] - crop_bbox[0], crop_bbox[3] - crop_bbox[1])
148
+ size = int(size * 1.2)
149
+ padded_bbox = (
150
+ center[0] - size // 2,
151
+ center[1] - size // 2,
152
+ center[0] + size // 2,
153
+ center[1] + size // 2,
154
+ )
155
+ return output.crop(padded_bbox).resize((518, 518), Image.Resampling.LANCZOS).convert("RGBA")
156
+
157
+
158
+ def _flatten_rgba_on_matte(image: Image.Image, matte_rgb: tuple[float, float, float]) -> Image.Image:
159
+ return NeARImageToRelightable3DPipeline.flatten_rgba_on_matte(image, matte_rgb)
160
+
161
+
162
+ def preview_hdri(hdri_file_obj: Any):
163
+ hdri_path = get_file_path(hdri_file_obj)
164
+ if not hdri_path:
165
+ return None, "Upload an HDRI `.exr` (left column)."
166
+ import pyexr # pyright: ignore[reportMissingImports]
167
+
168
+ hdri_np = pyexr.read(hdri_path)[..., :3]
169
+ tm = ToneMapper(view="Khronos PBR Neutral")
170
+ preview = tm.hdr_to_ldr(hdri_np)
171
+ preview = (np.clip(preview, 0, 1) * 255).astype(np.uint8)
172
+ name = Path(hdri_path).name
173
+ return preview, f"HDRI **{name}** — preview updated."
174
+
175
+
176
+ def switch_asset_source(mode: str):
177
+ return gr.Tabs(selected=1 if mode == "From Existing SLaT" else 0)
178
+
179
+
180
+ def _ensure_rgba(img: Image.Image) -> Image.Image:
181
+ if img.mode == "RGBA":
182
+ return img
183
+ return img.convert("RGBA")
184
+
185
+
186
+ @torch.inference_mode()
187
+ def preprocess_image_only(image_input: Optional[Image.Image]):
188
+ if image_input is None:
189
+ return None
190
+ return _preprocess_image_rgba_light(image_input)
191
+
192
+
193
+ @GPU
194
+ @torch.inference_mode()
195
+ def generate_mesh(
196
+ image_input: Optional[Image.Image],
197
+ req: gr.Request,
198
+ progress=gr.Progress(track_tqdm=True),
199
+ ):
200
+ session_dir = CACHE_DIR / str(req.session_hash)
201
+
202
+ if image_input is None:
203
+ raise gr.Error("Please upload an input image.")
204
+
205
+ rgba = _ensure_rgba(image_input)
206
+ if rgba.size != (518, 518):
207
+ rgba = _preprocess_image_rgba_light(rgba)
208
+ # Hunyuan3D mesh: composite onto white. SLaT step uses black matte separately.
209
+ mesh_rgb = _flatten_rgba_on_matte(rgba, (1.0, 1.0, 1.0))
210
+ rgba.save(session_dir / "input_preprocessed_rgba.png")
211
+ mesh_rgb.save(session_dir / "input_processed.png")
212
+
213
+ progress(0.6, desc="Generating geometry")
214
+ mesh = GEOMETRY_PIPELINE(image=mesh_rgb)[0]
215
+ mesh_path = session_dir / "initial_3d_shape.glb"
216
+ mesh.export(mesh_path)
217
+
218
+ _SESSION_SLAT.pop(str(req.session_hash), None)
219
+ state = {
220
+ "mode": "image",
221
+ "mesh_path": str(mesh_path),
222
+ "processed_image_path": str(session_dir / "input_processed.png"),
223
+ "slat_path": None,
224
+ "slat_in_memory": False,
225
+ }
226
+ return (
227
+ state,
228
+ str(mesh_path),
229
+ "**Mesh ready** — Click **② Generate / Load SLaT** to continue.",
230
+ )
231
+
232
+
233
+ @GPU
234
+ @torch.inference_mode()
235
+ def _generate_slat_inner(
236
+ asset_state: Dict[str, Any],
237
+ image_input: Optional[Image.Image],
238
+ seed: int,
239
+ req: gr.Request,
240
+ progress=gr.Progress(track_tqdm=True),
241
+ ):
242
+ """GPU body for SLaT generation — must be called from within a @GPU context."""
243
+ if not asset_state or not asset_state.get("mesh_path"):
244
+ raise gr.Error("Please run ① Generate Mesh first.")
245
+ mesh_path = asset_state["mesh_path"]
246
+ if not os.path.exists(mesh_path):
247
+ raise gr.Error("Mesh file not found — please regenerate the mesh.")
248
+
249
+ if image_input is None:
250
+ raise gr.Error("Preprocessed image not found — please upload the image again.")
251
+
252
+ progress(0.1, desc="Loading mesh")
253
+ mesh = trimesh.load(mesh_path, force="mesh")
254
+ rgba = _ensure_rgba(image_input)
255
+ if rgba.size != (518, 518):
256
+ rgba = _preprocess_image_rgba_light(rgba)
257
+ slat_rgb = _flatten_rgba_on_matte(rgba, (0.0, 0.0, 0.0))
258
+
259
+ progress(0.3, desc="Computing SLaT coordinates")
260
+ coords = PIPELINE.shape_to_coords(mesh)
261
+
262
+ progress(0.6, desc="Generating SLaT")
263
+ slat = PIPELINE.run_with_coords([slat_rgb], coords, seed=int(seed), preprocess_image=False)
264
+
265
+ _SESSION_SLAT[str(req.session_hash)] = slat
266
+ new_state = {**asset_state, "slat_path": None, "slat_in_memory": True}
267
+ return new_state, f"**Asset ready** — SLaT generated (seed `{seed}`)."
268
+
269
+
270
+ def _load_slat_file_inner(slat_upload: Any, slat_path_text: str, req: gr.Request):
271
+ resolved = get_file_path(slat_upload) or (slat_path_text.strip() if slat_path_text else "")
272
+ if not resolved:
273
+ raise gr.Error("Please provide a SLaT `.npz` path or upload one.")
274
+ if not os.path.exists(resolved):
275
+ raise gr.Error(f"SLaT file not found: `{resolved}`")
276
+ _SESSION_SLAT.pop(str(req.session_hash), None)
277
+ state = {
278
+ "mode": "slat",
279
+ "slat_path": resolved,
280
+ "mesh_path": None,
281
+ "processed_image_path": None,
282
+ "slat_in_memory": False,
283
+ }
284
+ return state, f"SLaT **{Path(resolved).name}** loaded."
285
+
286
+
287
+ @GPU
288
+ @torch.inference_mode()
289
+ def prepare_slat(
290
+ source_mode: str,
291
+ asset_state: Dict[str, Any],
292
+ image_input: Optional[Image.Image],
293
+ seed: int,
294
+ slat_upload: Any,
295
+ slat_path_text: str,
296
+ req: gr.Request,
297
+ progress=gr.Progress(track_tqdm=True),
298
+ ):
299
+ if source_mode == "From Image":
300
+ return _generate_slat_inner(asset_state, image_input, seed, req, progress)
301
+ return _load_slat_file_inner(slat_upload, slat_path_text, req)
302
+
303
+
304
+ def require_asset_state(asset_state: Optional[Dict[str, Any]]) -> Dict[str, Any]:
305
+ if not asset_state:
306
+ raise gr.Error("Please generate or load a SLaT first.")
307
+ if asset_state.get("slat_in_memory") or asset_state.get("slat_path"):
308
+ return asset_state
309
+ raise gr.Error("Please generate or load a SLaT first.")
310
+
311
+
312
+ def load_asset_and_hdri(asset_state: Dict[str, Any], hdri_file_obj: Any, req: gr.Request):
313
+ asset_state = require_asset_state(asset_state)
314
+ hdri_path = get_file_path(hdri_file_obj)
315
+ if not hdri_path:
316
+ raise gr.Error("Please upload an HDRI `.exr` file.")
317
+ if asset_state.get("slat_in_memory"):
318
+ slat = _SESSION_SLAT.get(str(req.session_hash))
319
+ if slat is None:
320
+ raise gr.Error("SLaT session expired — run **② Generate / Load SLaT** again.")
321
+ else:
322
+ slat_path = asset_state.get("slat_path")
323
+ if not slat_path:
324
+ raise gr.Error("Please generate or load a SLaT first.")
325
+ slat = PIPELINE.load_slat(slat_path)
326
+ hdri_np = PIPELINE.load_hdri(hdri_path)
327
+ return slat, hdri_np
328
+
329
+
330
+ @GPU
331
+ @torch.inference_mode()
332
+ def render_preview(
333
+ asset_state: Dict[str, Any],
334
+ hdri_file_obj: Any,
335
+ hdri_rot: float,
336
+ yaw: float,
337
+ pitch: float,
338
+ fov: float,
339
+ radius: float,
340
+ resolution: int,
341
+ req: gr.Request,
342
+ progress=gr.Progress(track_tqdm=True),
343
+ ):
344
+ t0 = time.time()
345
+ session_dir = CACHE_DIR / str(req.session_hash)
346
+ progress(0.1, desc="Loading SLaT and HDRI")
347
+ slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
348
+
349
+ progress(0.5, desc="Rendering")
350
+ views = PIPELINE.render_view(
351
+ slat, hdri_np,
352
+ yaw_deg=yaw, pitch_deg=pitch, fov=fov, radius=radius,
353
+ hdri_rot_deg=hdri_rot, resolution=int(resolution),
354
+ )
355
+ for key, image in views.items():
356
+ image.save(session_dir / f"preview_{key}.png")
357
+ print(f"[NeAR] render_preview {time.time() - t0:.1f}s", flush=True)
358
+
359
+ msg = (
360
+ f"**Preview done** — "
361
+ f"yaw `{yaw:.0f}°` pitch `{pitch:.0f}°` · "
362
+ f"fov `{fov:.0f}` radius `{radius:.1f}` · HDRI rot `{hdri_rot:.0f}°`"
363
+ )
364
+ return (
365
+ views["color"],
366
+ views["base_color"],
367
+ views["metallic"],
368
+ views["roughness"],
369
+ views["shadow"],
370
+ msg,
371
+ )
372
+
373
+
374
+ @GPU
375
+ @torch.inference_mode()
376
+ def render_camera_video(
377
+ asset_state: Dict[str, Any],
378
+ hdri_file_obj: Any,
379
+ hdri_rot: float,
380
+ fps: int,
381
+ num_views: int,
382
+ fov: float,
383
+ radius: float,
384
+ full_video: bool,
385
+ shadow_video: bool,
386
+ req: gr.Request,
387
+ progress=gr.Progress(track_tqdm=True),
388
+ ):
389
+ t0 = time.time()
390
+ session_dir = CACHE_DIR / str(req.session_hash)
391
+ progress(0.1, desc="Loading SLaT and HDRI")
392
+ slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
393
+
394
+ progress(0.4, desc="Rendering camera path")
395
+ frames = PIPELINE.render_camera_path_video(
396
+ slat, hdri_np,
397
+ num_views=int(num_views), fov=fov, radius=radius,
398
+ hdri_rot_deg=hdri_rot, full_video=full_video, shadow_video=shadow_video,
399
+ bg_color=(1, 1, 1), verbose=True,
400
+ )
401
+ video_path = session_dir / ("camera_path_full.mp4" if full_video else "camera_path.mp4")
402
+ imageio.mimsave(video_path, frames, fps=int(fps))
403
+ print(f"[NeAR] render_camera_video {time.time() - t0:.1f}s", flush=True)
404
+ return str(video_path), f"**Camera path video saved**"
405
+
406
+
407
+ @GPU
408
+ @torch.inference_mode()
409
+ def render_hdri_video(
410
+ asset_state: Dict[str, Any],
411
+ hdri_file_obj: Any,
412
+ fps: int,
413
+ num_frames: int,
414
+ yaw: float,
415
+ pitch: float,
416
+ fov: float,
417
+ radius: float,
418
+ full_video: bool,
419
+ shadow_video: bool,
420
+ req: gr.Request,
421
+ progress=gr.Progress(track_tqdm=True),
422
+ ):
423
+ t0 = time.time()
424
+ session_dir = CACHE_DIR / str(req.session_hash)
425
+ progress(0.1, desc="Loading SLaT and HDRI")
426
+ slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
427
+
428
+ progress(0.4, desc="Rendering HDRI rotation")
429
+ hdri_roll_frames, render_frames = PIPELINE.render_hdri_rotation_video(
430
+ slat, hdri_np,
431
+ num_frames=int(num_frames), yaw_deg=yaw, pitch_deg=pitch,
432
+ fov=fov, radius=radius, full_video=full_video, shadow_video=shadow_video,
433
+ bg_color=(1, 1, 1), verbose=True,
434
+ )
435
+ hdri_roll_path = session_dir / "hdri_roll.mp4"
436
+ render_path = session_dir / ("hdri_rotation_full.mp4" if full_video else "hdri_rotation.mp4")
437
+ imageio.mimsave(hdri_roll_path, hdri_roll_frames, fps=int(fps))
438
+ imageio.mimsave(render_path, render_frames, fps=int(fps))
439
+ print(f"[NeAR] render_hdri_video {time.time() - t0:.1f}s", flush=True)
440
+ return str(hdri_roll_path), str(render_path), "**HDRI rotation video saved**"
441
+
442
+
443
+ @GPU
444
+ def export_glb(
445
+ asset_state: Dict[str, Any],
446
+ hdri_file_obj: Any,
447
+ hdri_rot: float,
448
+ simplify: float,
449
+ texture_size: int,
450
+ req: gr.Request,
451
+ progress=gr.Progress(track_tqdm=True),
452
+ ):
453
+ t0 = time.time()
454
+ session_dir = CACHE_DIR / str(req.session_hash)
455
+ progress(0.1, desc="Loading SLaT and HDRI")
456
+ slat, hdri_np = load_asset_and_hdri(asset_state, hdri_file_obj, req)
457
+
458
+ progress(0.6, desc="Baking PBR textures")
459
+ glb = PIPELINE.export_glb_from_slat(
460
+ slat, hdri_np,
461
+ hdri_rot_deg=hdri_rot, base_mesh=None,
462
+ simplify=simplify, texture_size=int(texture_size), fill_holes=True,
463
+ )
464
+ glb_path = session_dir / "near_pbr.glb"
465
+ glb.export(glb_path)
466
+ print(f"[NeAR] export_glb {time.time() - t0:.1f}s", flush=True)
467
+ return str(glb_path), f"PBR GLB exported: **{glb_path.name}**"
468
+
469
+
470
+ CUSTOM_CSS = """
471
+ .gradio-container { max-width: 100% !important; width: 100% !important; }
472
+ main.gradio-container { max-width: 100% !important; }
473
+ .gradio-wrap { max-width: 100% !important; }
474
+
475
+ /* Top header: TRELLIS-style left-aligned title + bullets */
476
+ .near-app-header {
477
+ text-align: left !important;
478
+ padding: 0.35rem 0 1.1rem 0 !important;
479
+ margin: 0 !important;
480
+ }
481
+ .near-app-header .prose,
482
+ .near-app-header p { margin: 0 !important; }
483
+ .near-app-header h2 {
484
+ font-size: clamp(1.35rem, 2.4vw, 1.85rem) !important;
485
+ font-weight: 700 !important;
486
+ letter-spacing: -0.02em !important;
487
+ margin: 0 0 0.45rem 0 !important;
488
+ line-height: 1.25 !important;
489
+ }
490
+ .near-app-header h2 a {
491
+ color: var(--link-text-color, var(--color-accent)) !important;
492
+ text-decoration: none !important;
493
+ }
494
+ .near-app-header h2 a:hover { text-decoration: underline !important; }
495
+ .near-app-header ul {
496
+ margin: 0 !important;
497
+ padding-left: 1.2rem !important;
498
+ font-size: 0.88rem !important;
499
+ color: #4b5563 !important;
500
+ line-height: 1.45 !important;
501
+ }
502
+ .near-app-header li { margin: 0.15rem 0 !important; }
503
+
504
+ /* Left column: compact section labels (no numbered circles) */
505
+ .section-kicker {
506
+ font-size: 0.7rem !important;
507
+ font-weight: 700 !important;
508
+ color: #9ca3af !important;
509
+ text-transform: uppercase !important;
510
+ letter-spacing: 0.08em !important;
511
+ margin: 0 0 0.45rem 0 !important;
512
+ padding: 0 !important;
513
+ }
514
+
515
+ /* HDRI file picker: light card instead of default dark block */
516
+ .hdri-upload-zone,
517
+ .hdri-file-input,
518
+ .hdri-upload-zone .upload-container,
519
+ .hdri-upload-zone [data-testid="file-upload"],
520
+ .hdri-file-input [data-testid="file-upload"],
521
+ .hdri-upload-zone .file-preview,
522
+ .hdri-file-input .file-preview,
523
+ .hdri-upload-zone .wrap,
524
+ .hdri-file-input .wrap,
525
+ .hdri-upload-zone .panel,
526
+ .hdri-file-input .panel {
527
+ background: #f9fafb !important;
528
+ border-color: #e5e7eb !important;
529
+ color: #374151 !important;
530
+ }
531
+ .hdri-upload-zone .file-preview,
532
+ .hdri-file-input .file-preview { border-radius: 8px !important; }
533
+ .hdri-upload-zone .label-wrap,
534
+ .hdri-file-input .label-wrap { color: #4b5563 !important; }
535
+
536
+ /* HDRI preview image: remove thick / black frame (Gradio panel border) */
537
+ .hdri-preview-image,
538
+ .hdri-preview-image.panel,
539
+ .hdri-preview-image .wrap,
540
+ .hdri-preview-image .image-container,
541
+ .hdri-preview-image .image-frame,
542
+ .hdri-preview-image .image-wrapper,
543
+ .hdri-preview-image [data-testid="image"],
544
+ .hdri-preview-image .icon-buttons,
545
+ .hdri-preview-image img {
546
+ border: none !important;
547
+ outline: none !important;
548
+ box-shadow: none !important;
549
+ }
550
+ .hdri-preview-image img {
551
+ border-radius: 8px !important;
552
+ }
553
+
554
+ /* Export accordion: remove heavy black box; keep a light separator on the header only */
555
+ .export-accordion,
556
+ .export-accordion.panel,
557
+ .export-accordion > div,
558
+ .export-accordion details,
559
+ .export-accordion .label-wrap,
560
+ .export-accordion .accordion-header {
561
+ border: none !important;
562
+ outline: none !important;
563
+ box-shadow: none !important;
564
+ }
565
+ .export-accordion summary,
566
+ .export-accordion .label-wrap {
567
+ border-bottom: 1px solid #e5e7eb !important;
568
+ background: transparent !important;
569
+ }
570
+
571
+ /* Gradio 4+ block chrome sometimes forces --block-border-color */
572
+ .gradio-container .hdri-preview-image,
573
+ .gradio-container .export-accordion {
574
+ --block-border-width: 0px !important;
575
+ --panel-border-width: 0 !important;
576
+ }
577
+
578
+ /* Shadow map preview: same flat frame as HDRI preview */
579
+ .shadow-preview-image,
580
+ .shadow-preview-image.panel,
581
+ .shadow-preview-image .wrap,
582
+ .shadow-preview-image .image-container,
583
+ .shadow-preview-image .image-frame,
584
+ .shadow-preview-image .image-wrapper,
585
+ .shadow-preview-image [data-testid="image"],
586
+ .shadow-preview-image img {
587
+ border: none !important;
588
+ outline: none !important;
589
+ box-shadow: none !important;
590
+ }
591
+ .shadow-preview-image img { border-radius: 8px !important; }
592
+ .gradio-container .shadow-preview-image {
593
+ --block-border-width: 0px !important;
594
+ --panel-border-width: 0 !important;
595
+ }
596
+
597
+ /* Main output tabs: larger, easier to spot */
598
+ .main-output-tabs > .tab-nav,
599
+ .main-output-tabs .tab-nav button {
600
+ font-size: 0.95rem !important;
601
+ font-weight: 600 !important;
602
+ }
603
+ .main-output-tabs .tab-nav button { padding: 0.45rem 0.9rem !important; }
604
+
605
+ /* Status strip: one left accent only (Gradio panel also draws accent — disable it here) */
606
+ .gradio-container .status-footer,
607
+ .status-footer.panel,
608
+ .status-footer.block {
609
+ --block-border-width: 0px !important;
610
+ --panel-border-width: 0px !important;
611
+ }
612
+ .status-footer {
613
+ font-size: 0.8125rem !important;
614
+ line-height: 1.45 !important;
615
+ color: var(--body-text-color-subdued, #6b7280) !important;
616
+ margin: 0 0 0.65rem 0 !important;
617
+ padding: 0.5rem 0.65rem 0.5rem 0.7rem !important;
618
+ background: var(--block-background-fill, #f9fafb) !important;
619
+ /* Single box: one thick left edge (avoid stacking with Gradio .block border) */
620
+ border-width: 1px 1px 1px 3px !important;
621
+ border-style: solid !important;
622
+ border-color: var(--border-color-primary, #e5e7eb) var(--border-color-primary, #e5e7eb)
623
+ var(--border-color-primary, #e5e7eb) var(--color-accent, #2563eb) !important;
624
+ border-radius: 8px !important;
625
+ box-shadow: 0 1px 2px rgba(15, 23, 42, 0.05) !important;
626
+ }
627
+ .status-footer .form,
628
+ .status-footer .wrap,
629
+ .status-footer .prose,
630
+ .status-footer .prose > *:first-child {
631
+ border: none !important;
632
+ box-shadow: none !important;
633
+ }
634
+ .status-footer .prose blockquote {
635
+ border-left: none !important;
636
+ padding-left: 0 !important;
637
+ margin-left: 0 !important;
638
+ }
639
+ .status-footer p,
640
+ .status-footer .prose p {
641
+ margin: 0 !important;
642
+ line-height: 1.05 !important;
643
+ }
644
+ .status-footer strong {
645
+ color: var(--body-text-color, #374151) !important;
646
+ font-weight: 600 !important;
647
+ }
648
+ .status-footer a {
649
+ color: var(--link-text-color, var(--color-accent, #2563eb)) !important;
650
+ text-decoration: none !important;
651
+ }
652
+ .status-footer a:hover { text-decoration: underline !important; }
653
+
654
+ .ctrl-strip {
655
+ border:1px solid #e5e7eb; border-radius:8px;
656
+ padding:0.55rem 0.8rem 0.4rem; margin-bottom:0.6rem; background:#fff;
657
+ }
658
+ .ctrl-strip-title {
659
+ font-size:0.72rem; font-weight:600; color:#9ca3af;
660
+ text-transform:uppercase; letter-spacing:0.06em; margin-bottom:0.4rem;
661
+ }
662
+
663
+ .mat-label {
664
+ font-size:0.72rem; font-weight:700; color:#9ca3af;
665
+ text-transform:uppercase; letter-spacing:0.07em; margin:0.7rem 0 0.2rem;
666
+ }
667
+
668
+ .divider { border:none; border-top:1px solid #e5e7eb; margin:0.5rem 0; }
669
+
670
+ .img-gallery table { display:grid !important; grid-template-columns:repeat(3,1fr) !important; gap:3px !important; }
671
+ .img-gallery table thead { display:none !important; }
672
+ .img-gallery table tr { display:contents !important; }
673
+ .img-gallery table td { padding:0 !important; }
674
+ .img-gallery table td img { width:100% !important; height:68px !important; object-fit:cover !important; border-radius:5px !important; }
675
+
676
+ .hdri-gallery table { display:grid !important; grid-template-columns:repeat(2,1fr) !important; gap:3px !important; }
677
+ .hdri-gallery table thead { display:none !important; }
678
+ .hdri-gallery table tr { display:contents !important; }
679
+ .hdri-gallery table td { padding:0 !important; font-size:0.76rem; text-align:center; word-break:break-all; }
680
+
681
+ /* Right sidebar: align with TRELLIS-style narrow examples column */
682
+ .sidebar-examples { min-width: 0 !important; }
683
+ .sidebar-examples .label-wrap { font-size: 0.85rem !important; }
684
+ .gradio-container .sidebar-examples table { width: 100% !important; }
685
+
686
+ footer { display:none !important; }
687
+ """
688
+
689
+ NEAR_GRADIO_THEME = gr.themes.Base(
690
+ primary_hue=gr.themes.colors.blue,
691
+ secondary_hue=gr.themes.colors.blue,
692
+ )
693
+
694
+
695
+ def build_app() -> gr.Blocks:
696
+ with gr.Blocks(
697
+ title="NeAR",
698
+ theme=NEAR_GRADIO_THEME,
699
+ delete_cache=None,
700
+ fill_width=True,
701
+ ) as demo:
702
+ asset_state = gr.State({})
703
+
704
+ gr.Markdown(
705
+ """
706
+ ## Single Image to Relightable 3DGS with [NeAR](https://near-project.github.io/)
707
+ * Upload an RGBA image (or load an existing SLaT), run **Generate Mesh** then **Generate / Load SLaT**, pick an HDRI, and use **Camera & HDRI** to relight.
708
+ * Use **Geometry** for mesh / PBR preview, **Preview** for still renders, **Videos** for camera or HDRI paths; **Export PBR GLB** when you are happy with the result.
709
+ * Texture style transfer is possible when the reference images used for **mesh** and **SLaT** are different.
710
+ """,
711
+ elem_classes=["near-app-header"],
712
+ )
713
+
714
+ _img_ex = [
715
+ [str(p)]
716
+ for p in sorted((APP_DIR / "assets/example_image").glob("*.png"))
717
+ if not _path_is_git_lfs_pointer(p)
718
+ ]
719
+ _slat_ex = [
720
+ [str(p)]
721
+ for p in sorted((APP_DIR / "assets/example_slats").glob("*.npz"))
722
+ if not _path_is_git_lfs_pointer(p)
723
+ ]
724
+ _hdri_ex = [
725
+ [str(p)]
726
+ for p in sorted((APP_DIR / "assets/hdris").glob("*.exr"))
727
+ if not _path_is_git_lfs_pointer(p)
728
+ ]
729
+ if not _img_ex and (APP_DIR / "assets/example_image").is_dir():
730
+ print(
731
+ "[NeAR] WARNING: no usable PNG examples (empty dir or all Git LFS pointers).",
732
+ flush=True,
733
+ )
734
+
735
+ with gr.Row(equal_height=False):
736
+
737
+ with gr.Column(scale=1, min_width=360):
738
+
739
+ with gr.Group():
740
+ gr.HTML('<p class="section-kicker">Asset</p>')
741
+ source_mode = gr.Radio(
742
+ ["From Image", "From Existing SLaT"],
743
+ value="From Image",
744
+ label="",
745
+ show_label=False,
746
+ )
747
+ with gr.Tabs(selected=0) as source_tabs:
748
+
749
+ with gr.Tab("Image", id=0):
750
+ image_input = gr.Image(
751
+ label="Input Image", type="pil", image_mode="RGBA",
752
+ value=str(DEFAULT_IMAGE) if DEFAULT_IMAGE.exists() else None,
753
+ height=400,
754
+ )
755
+ seed = gr.Slider(0, MAX_SEED, value=43, step=1, label="Seed (SLaT)")
756
+ mesh_button = gr.Button("① Generate Mesh", variant="primary", min_width=100)
757
+
758
+ with gr.Tab("SLaT", id=1):
759
+ slat_upload = gr.File(label="Upload SLaT (.npz)", file_types=[".npz"])
760
+ slat_path_text = gr.Textbox(
761
+ label="Or enter local path",
762
+ placeholder="/path/to/sample_slat.npz",
763
+ )
764
+
765
+ slat_button = gr.Button(
766
+ "② Generate / Load SLaT", variant="primary", min_width=100,
767
+ )
768
+
769
+ with gr.Group():
770
+ gr.HTML('<p class="section-kicker">HDRI</p>')
771
+ with gr.Column(elem_classes=["hdri-upload-zone"]):
772
+ hdri_file = gr.File(
773
+ label="Environment (.exr)", file_types=[".exr"],
774
+ value=str(DEFAULT_HDRI) if DEFAULT_HDRI.exists() else None,
775
+ elem_classes=["hdri-file-input"],
776
+ )
777
+ hdri_preview = gr.Image(
778
+ label="Preview",
779
+ interactive=False,
780
+ height=130,
781
+ container=False,
782
+ elem_classes=["hdri-preview-image"],
783
+ )
784
+
785
+ with gr.Group():
786
+ gr.HTML('<p class="section-kicker">Export</p>')
787
+ with gr.Accordion(
788
+ "Export Settings",
789
+ open=False,
790
+ elem_classes=["export-accordion"],
791
+ ):
792
+ with gr.Row():
793
+ simplify = gr.Slider(0.8, 0.99, value=0.95, step=0.01, label="Mesh Simplify")
794
+ texture_size = gr.Slider(512, 4096, value=2048, step=512, label="Texture Size")
795
+
796
+ with gr.Row():
797
+ clear_button = gr.Button("Clear Cache", variant="secondary", min_width=100)
798
+
799
+ with gr.Column(scale=10, min_width=560):
800
+
801
+ status_md = gr.Markdown(
802
+ "Ready — use **Asset** (left) and **HDRI** to begin.",
803
+ elem_classes=["status-footer"],
804
+ )
805
+
806
+
807
+ with gr.Group(elem_classes=["ctrl-strip"]):
808
+ gr.HTML("<div class='ctrl-strip-title'>Camera &amp; HDRI</div>")
809
+ with gr.Row():
810
+ tone_mapper_name = gr.Dropdown(
811
+ choices=AVAILABLE_TONE_MAPPERS,
812
+ value="AgX",
813
+ label="Tone Mapper",
814
+ min_width=120,
815
+ )
816
+ hdri_rot = gr.Slider(0, 360, value=0, step=1, label="HDRI Rotation °")
817
+ resolution = gr.Slider(256, 1024, value=512, step=256, label="Preview Res")
818
+ with gr.Row():
819
+ yaw = gr.Slider(0, 360, value=0, step=0.5, label="Yaw °")
820
+ pitch = gr.Slider(-90, 90, value=0, step=0.5, label="Pitch °")
821
+ fov = gr.Slider(10, 70, value=40, step=1, label="FoV")
822
+ radius = gr.Slider(1.0, 4.0, value=2.0, step=0.05, label="Radius")
823
+
824
+ tone_mapper_name.change(
825
+ set_tone_mapper,
826
+ inputs=[tone_mapper_name],
827
+ outputs=[],
828
+ )
829
+
830
+ with gr.Tabs(elem_classes=["main-output-tabs"]):
831
+
832
+ with gr.Tab("Geometry", id=0):
833
+ with gr.Row():
834
+ mesh_viewer = gr.Model3D(
835
+ label="3D Mesh", interactive=False, height=520,
836
+ )
837
+ pbr_viewer = gr.Model3D(
838
+ label="PBR GLB", interactive=False, height=520,
839
+ )
840
+ gr.HTML("<hr class='divider'>")
841
+ with gr.Row():
842
+ export_glb_button = gr.Button("Export PBR GLB", variant="primary", min_width=140)
843
+
844
+ with gr.Tab("Preview", id=1):
845
+ preview_button = gr.Button("Render Preview", variant="primary", min_width=100)
846
+ gr.HTML("<hr class='divider'>")
847
+ with gr.Row():
848
+ color_output = gr.Image(label="Relit Result", interactive=False, height=400)
849
+ with gr.Column():
850
+ with gr.Row():
851
+ base_color_output = gr.Image(label="Base Color", interactive=False, height=200)
852
+ metallic_output = gr.Image(label="Metallic", interactive=False, height=200)
853
+ with gr.Row():
854
+ roughness_output = gr.Image(label="Roughness", interactive=False, height=200)
855
+ shadow_output = gr.Image(label="Shadow", interactive=False, height=200)
856
+
857
+ with gr.Tab("Videos", id=2):
858
+ with gr.Accordion("Video Settings", open=False):
859
+ with gr.Row():
860
+ fps = gr.Slider(1, 60, value=24, step=1, label="FPS")
861
+ num_views = gr.Slider(8, 120, value=40, step=1, label="Camera Frames")
862
+ num_frames = gr.Slider(8, 120, value=40, step=1, label="HDRI Frames")
863
+ with gr.Row():
864
+ full_video = gr.Checkbox(label="Full composite video", value=True)
865
+ shadow_video = gr.Checkbox(
866
+ label="Include shadow in video",
867
+ value=True,
868
+ )
869
+ with gr.Row():
870
+ camera_video_button = gr.Button("Camera Path Video", variant="primary", min_width=100)
871
+ hdri_video_button = gr.Button("HDRI Rotation Video", variant="primary", min_width=100)
872
+ camera_video_output = gr.Video(
873
+ label="Camera Path", autoplay=True, loop=True, height=340,
874
+ )
875
+ hdri_render_video_output = gr.Video(
876
+ label="HDRI Rotation Render", autoplay=True, loop=True, height=300,
877
+ )
878
+ with gr.Accordion("HDRI Roll (environment panorama)", open=False):
879
+ hdri_roll_video_output = gr.Video(
880
+ label="HDRI Roll", autoplay=True, loop=True, height=180,
881
+ )
882
+
883
+ with gr.Column(scale=1, min_width=172):
884
+ with gr.Column(visible=True, elem_classes=["sidebar-examples", "img-gallery"]) as col_img_examples:
885
+ if _img_ex:
886
+ gr.Examples(
887
+ examples=_img_ex,
888
+ inputs=[image_input],
889
+ fn=preprocess_image_only,
890
+ outputs=[image_input],
891
+ run_on_click=True,
892
+ examples_per_page=18,
893
+ label="Examples",
894
+ )
895
+ else:
896
+ gr.Markdown("*No PNG examples in `assets/example_image`*")
897
+
898
+ with gr.Column(visible=False, elem_classes=["sidebar-examples"]) as col_slat_examples:
899
+ if _slat_ex:
900
+ gr.Examples(
901
+ examples=_slat_ex,
902
+ inputs=[slat_path_text],
903
+ label="Example SLaTs",
904
+ )
905
+ else:
906
+ gr.Markdown("*No `.npz` examples in `assets/example_slats`*")
907
+
908
+ with gr.Column(visible=True, elem_classes=["sidebar-examples", "hdri-gallery"]) as col_hdri_examples:
909
+ if _hdri_ex:
910
+ gr.Examples(
911
+ examples=_hdri_ex,
912
+ inputs=[hdri_file],
913
+ label="Example HDRIs",
914
+ examples_per_page=8,
915
+ )
916
+ else:
917
+ gr.Markdown("*No `.exr` examples in `assets/hdris`*")
918
+
919
+ demo.load(start_session)
920
+ demo.unload(end_session)
921
+
922
+ source_mode.change(switch_asset_source, inputs=[source_mode], outputs=[source_tabs])
923
+ source_mode.change(
924
+ lambda m: (
925
+ gr.update(visible=m == "From Image"),
926
+ gr.update(visible=m == "From Existing SLaT"),
927
+ ),
928
+ inputs=[source_mode],
929
+ outputs=[col_img_examples, col_slat_examples],
930
+ )
931
+
932
+ for _trigger in (hdri_file.upload, hdri_file.change):
933
+ _trigger(
934
+ preview_hdri,
935
+ inputs=[hdri_file],
936
+ outputs=[hdri_preview, status_md],
937
+ )
938
+
939
+ image_input.upload(
940
+ preprocess_image_only,
941
+ inputs=[image_input],
942
+ outputs=[image_input],
943
+ )
944
+
945
+ mesh_button.click(
946
+ generate_mesh,
947
+ inputs=[image_input],
948
+ outputs=[asset_state, mesh_viewer, status_md],
949
+ )
950
+
951
+ slat_button.click(
952
+ prepare_slat,
953
+ inputs=[source_mode, asset_state, image_input, seed, slat_upload, slat_path_text],
954
+ outputs=[asset_state, status_md],
955
+ )
956
+
957
+ preview_button.click(
958
+ render_preview,
959
+ inputs=[asset_state, hdri_file, hdri_rot,
960
+ yaw, pitch, fov, radius, resolution],
961
+ outputs=[
962
+ color_output,
963
+ base_color_output,
964
+ metallic_output,
965
+ roughness_output,
966
+ shadow_output,
967
+ status_md,
968
+ ],
969
+ )
970
+
971
+ camera_video_button.click(
972
+ render_camera_video,
973
+ inputs=[asset_state, hdri_file, hdri_rot,
974
+ fps, num_views, fov, radius, full_video, shadow_video],
975
+ outputs=[camera_video_output, status_md],
976
+ )
977
+
978
+ hdri_video_button.click(
979
+ render_hdri_video,
980
+ inputs=[asset_state, hdri_file,
981
+ fps, num_frames, yaw, pitch, fov, radius, full_video, shadow_video],
982
+ outputs=[hdri_roll_video_output, hdri_render_video_output, status_md],
983
+ )
984
+
985
+ export_glb_button.click(
986
+ export_glb,
987
+ inputs=[asset_state, hdri_file, hdri_rot, simplify, texture_size],
988
+ outputs=[pbr_viewer, status_md],
989
+ )
990
+ return demo
991
+
992
+
993
+ PIPELINE = NeARImageToRelightable3DPipeline.from_pretrained("luh0502/NeAR")
994
+ GEOMETRY_PIPELINE = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained("tencent/Hunyuan3D-2.1")
995
+
996
+ if spaces is not None:
997
+ PIPELINE.to("cuda")
998
+ GEOMETRY_PIPELINE.to("cuda")
999
+
1000
+ demo = build_app()
1001
+
1002
+ if __name__ == "__main__":
1003
+ demo.launch(
1004
+ mcp_server=True
1005
+ )