Deploy Space app files
Browse files
app.py
CHANGED
|
@@ -23,13 +23,9 @@ from PIL import Image
|
|
| 23 |
from utils import (
|
| 24 |
BOOL_FILTER_CHOICES,
|
| 25 |
FILTER_ALL,
|
| 26 |
-
aggregate_by_model,
|
| 27 |
filter_dataframe_advanced,
|
| 28 |
-
format_instance_choice,
|
| 29 |
-
format_model_choice,
|
| 30 |
get_distinct_text_choices,
|
| 31 |
logger,
|
| 32 |
-
parse_choice_index,
|
| 33 |
require_bool_columns,
|
| 34 |
require_columns,
|
| 35 |
require_text_columns,
|
|
@@ -85,6 +81,13 @@ def load_preview_dataframe() -> pd.DataFrame:
|
|
| 85 |
return df
|
| 86 |
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
def build_preview_instance_dataframe(preview_df: pd.DataFrame) -> pd.DataFrame:
|
| 89 |
"""Derive one row per preview instance from preview frame rows."""
|
| 90 |
instance_cols = [
|
|
@@ -119,34 +122,49 @@ def build_preview_instance_dataframe(preview_df: pd.DataFrame) -> pd.DataFrame:
|
|
| 119 |
return df
|
| 120 |
|
| 121 |
|
| 122 |
-
def
|
| 123 |
-
preview_df: pd.DataFrame, instance_id: str, split: str = "train", max_frames: int = 50,
|
| 124 |
-
) -> list[dict[str, Any]]:
|
| 125 |
-
"""Load preview image payloads for one instance from preview Parquet."""
|
| 126 |
selected = preview_df[
|
| 127 |
(preview_df["instance_id"].astype(str) == str(instance_id))
|
| 128 |
-
& (preview_df["split"].astype(str) ==
|
| 129 |
].copy()
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
-
rows = selected.to_dict(orient="records")
|
| 133 |
-
if not rows and split == "train":
|
| 134 |
-
selected = preview_df[
|
| 135 |
-
preview_df["instance_id"].astype(str) == str(instance_id)
|
| 136 |
-
].copy()
|
| 137 |
-
selected = selected.sort_values(["split", "frame_id"]).head(max_frames)
|
| 138 |
-
rows = selected.to_dict(orient="records")
|
| 139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
frames: list[dict[str, Any]] = []
|
| 141 |
for example in rows:
|
| 142 |
frame_id = int(example["frame_id"])
|
| 143 |
frame_item: dict[str, Any] = {"frame_id": frame_id}
|
| 144 |
for key in ("rgb", "mask", "depth_preview", "normal_preview"):
|
| 145 |
-
|
| 146 |
-
if not isinstance(img_bytes, (bytes, bytearray)) or not img_bytes:
|
| 147 |
-
raise TypeError(f"Expected non-empty image bytes for {key} frame {frame_id}.")
|
| 148 |
-
with Image.open(io.BytesIO(img_bytes)) as img:
|
| 149 |
-
frame_item[key] = img.copy()
|
| 150 |
frames.append(frame_item)
|
| 151 |
return frames
|
| 152 |
|
|
@@ -220,42 +238,9 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 220 |
model_name_choices = get_distinct_text_choices(instance_df, "model_name")
|
| 221 |
material_name_choices = get_distinct_text_choices(instance_df, "material_name")
|
| 222 |
env_name_choices = get_distinct_text_choices(instance_df, "env_name")
|
| 223 |
-
|
| 224 |
-
model_display_cols = [
|
| 225 |
-
"instance_id",
|
| 226 |
-
"model_name",
|
| 227 |
-
"material_name",
|
| 228 |
-
"env_name",
|
| 229 |
-
"hasGlass",
|
| 230 |
-
"isGenerated",
|
| 231 |
-
"transparent",
|
| 232 |
-
"near_light",
|
| 233 |
-
]
|
| 234 |
-
instance_display_cols = [
|
| 235 |
-
"instance_id",
|
| 236 |
-
"model_name",
|
| 237 |
-
"material_name",
|
| 238 |
-
"env_name",
|
| 239 |
-
"hasGlass",
|
| 240 |
-
"isGenerated",
|
| 241 |
-
"transparent",
|
| 242 |
-
"near_light",
|
| 243 |
-
]
|
| 244 |
-
model_extra_cols = [
|
| 245 |
-
"material_name",
|
| 246 |
-
"env_name",
|
| 247 |
-
"hasGlass",
|
| 248 |
-
"isGenerated",
|
| 249 |
-
"transparent",
|
| 250 |
-
"near_light",
|
| 251 |
-
"glb_path",
|
| 252 |
-
]
|
| 253 |
-
|
| 254 |
stats_md = build_stats_markdown(instance_df, preview_df)
|
| 255 |
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
def search_models(
|
| 259 |
model_name: str,
|
| 260 |
material_name: str,
|
| 261 |
env_name: str,
|
|
@@ -263,7 +248,7 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 263 |
is_generated: str,
|
| 264 |
transparent: str,
|
| 265 |
near_light: str,
|
| 266 |
-
):
|
| 267 |
filtered = filter_dataframe_advanced(
|
| 268 |
instance_df,
|
| 269 |
model_name=model_name,
|
|
@@ -274,42 +259,11 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 274 |
transparent=transparent,
|
| 275 |
near_light=near_light,
|
| 276 |
)
|
| 277 |
-
|
| 278 |
-
shown = aggregated.head(MAX_RESULTS).copy()
|
| 279 |
rows = shown.to_dict(orient="records")
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
f"Matched **{len(aggregated)}** models, showing **{len(rows)}**. "
|
| 284 |
-
f"Preview instances: **{len(filtered)}**."
|
| 285 |
-
)
|
| 286 |
-
table = shown[model_display_cols] if not shown.empty else pd.DataFrame(columns=model_display_cols)
|
| 287 |
-
meta = rows[0] if rows else {}
|
| 288 |
-
return summary, table, gr.update(choices=choices, value=selected), rows, meta
|
| 289 |
-
|
| 290 |
-
def on_model_select(choice: str, rows: list[dict[str, Any]]):
|
| 291 |
-
if not choice or not rows:
|
| 292 |
-
return {}
|
| 293 |
-
idx = parse_choice_index(choice, len(rows))
|
| 294 |
-
if idx is None:
|
| 295 |
-
return {}
|
| 296 |
-
return rows[idx]
|
| 297 |
-
|
| 298 |
-
def on_load_3d(rows: list[dict[str, Any]], choice: str):
|
| 299 |
-
if not choice or not rows:
|
| 300 |
-
return None
|
| 301 |
-
idx = parse_choice_index(choice, len(rows))
|
| 302 |
-
if idx is None:
|
| 303 |
-
return None
|
| 304 |
-
glb = rows[idx]["glb_path"]
|
| 305 |
-
logger.info("on_load_3d: glb_path=%r", glb)
|
| 306 |
-
if not isinstance(glb, str) or not glb.strip():
|
| 307 |
-
raise ValueError(f"Selected model row does not contain a GLB path: {rows[idx]!r}")
|
| 308 |
-
return download_glb(glb)
|
| 309 |
-
|
| 310 |
-
# ---- Image Viewer callbacks ----
|
| 311 |
-
|
| 312 |
-
def search_instances(
|
| 313 |
model_name: str,
|
| 314 |
material_name: str,
|
| 315 |
env_name: str,
|
|
@@ -318,8 +272,7 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 318 |
transparent: str,
|
| 319 |
near_light: str,
|
| 320 |
):
|
| 321 |
-
filtered =
|
| 322 |
-
instance_df,
|
| 323 |
model_name=model_name,
|
| 324 |
material_name=material_name,
|
| 325 |
env_name=env_name,
|
|
@@ -328,28 +281,24 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 328 |
transparent=transparent,
|
| 329 |
near_light=near_light,
|
| 330 |
)
|
| 331 |
-
shown = filtered.head(MAX_RESULTS).copy()
|
| 332 |
-
rows = shown[instance_display_cols].to_dict(orient="records")
|
| 333 |
-
choices = [format_instance_choice(i, r) for i, r in enumerate(rows)]
|
| 334 |
-
selected = choices[0] if choices else None
|
| 335 |
summary = f"Matched **{len(filtered)}** preview instances, showing **{len(rows)}**."
|
| 336 |
-
|
| 337 |
-
return summary, table, gr.update(choices=choices, value=selected), rows
|
| 338 |
-
|
| 339 |
-
def on_load_images(rows: list[dict[str, Any]], choice: str):
|
| 340 |
slider_empty = gr.update(minimum=1, maximum=1, step=1, value=1, interactive=False)
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
if
|
| 345 |
-
return [],
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
if not isinstance(instance_id, str) or not instance_id.strip():
|
| 348 |
raise ValueError(f"Selected instance row has invalid instance_id: {rows[idx]!r}")
|
| 349 |
logger.info("Loading images for instance: %s", instance_id)
|
| 350 |
-
frame_items = load_instance_frames(preview_df, instance_id,
|
| 351 |
-
if not frame_items:
|
| 352 |
-
return [], slider_empty, []
|
| 353 |
slider_ready = gr.update(
|
| 354 |
minimum=1,
|
| 355 |
maximum=len(frame_items),
|
|
@@ -357,126 +306,95 @@ def build_app(instance_df: pd.DataFrame, preview_df: pd.DataFrame) -> gr.Blocks:
|
|
| 357 |
value=1,
|
| 358 |
interactive=True,
|
| 359 |
)
|
| 360 |
-
return render_frame_gallery(frame_items, 1), slider_ready, frame_items
|
| 361 |
|
| 362 |
def on_frame_change(frame_idx: float, frame_items: list[dict[str, Any]]):
|
| 363 |
return render_frame_gallery(frame_items, frame_idx)
|
| 364 |
|
| 365 |
-
|
|
|
|
|
|
|
| 366 |
|
| 367 |
with gr.Blocks(title="3DReflecNet Dataset Explorer") as demo:
|
| 368 |
gr.Markdown("# 3DReflecNet Dataset Explorer")
|
| 369 |
gr.Markdown(
|
| 370 |
-
"
|
| 371 |
)
|
| 372 |
gr.Markdown(stats_md)
|
| 373 |
|
| 374 |
-
with gr.
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
)
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
minimum=1,
|
| 448 |
-
maximum=1,
|
| 449 |
-
step=1,
|
| 450 |
-
value=1,
|
| 451 |
-
interactive=False,
|
| 452 |
-
)
|
| 453 |
-
|
| 454 |
-
img_state = gr.State([])
|
| 455 |
-
img_frame_state = gr.State([])
|
| 456 |
-
|
| 457 |
-
img_btn.click(
|
| 458 |
-
fn=search_instances,
|
| 459 |
-
inputs=[
|
| 460 |
-
img_model_name,
|
| 461 |
-
img_material_name,
|
| 462 |
-
img_env_name,
|
| 463 |
-
img_has_glass,
|
| 464 |
-
img_is_generated,
|
| 465 |
-
img_transparent,
|
| 466 |
-
img_near_light,
|
| 467 |
-
],
|
| 468 |
-
outputs=[img_summary, img_table, img_select, img_state],
|
| 469 |
-
)
|
| 470 |
-
img_load_btn.click(
|
| 471 |
-
fn=on_load_images,
|
| 472 |
-
inputs=[img_state, img_select],
|
| 473 |
-
outputs=[img_gallery, img_frame_slider, img_frame_state],
|
| 474 |
-
)
|
| 475 |
-
img_frame_slider.change(
|
| 476 |
-
fn=on_frame_change,
|
| 477 |
-
inputs=[img_frame_slider, img_frame_state],
|
| 478 |
-
outputs=[img_gallery],
|
| 479 |
-
)
|
| 480 |
|
| 481 |
return demo
|
| 482 |
|
|
|
|
| 23 |
from utils import (
|
| 24 |
BOOL_FILTER_CHOICES,
|
| 25 |
FILTER_ALL,
|
|
|
|
| 26 |
filter_dataframe_advanced,
|
|
|
|
|
|
|
| 27 |
get_distinct_text_choices,
|
| 28 |
logger,
|
|
|
|
| 29 |
require_bool_columns,
|
| 30 |
require_columns,
|
| 31 |
require_text_columns,
|
|
|
|
| 81 |
return df
|
| 82 |
|
| 83 |
|
| 84 |
+
def decode_image_bytes(img_bytes: bytes | bytearray, context: str) -> Image.Image:
|
| 85 |
+
if not isinstance(img_bytes, (bytes, bytearray)) or not img_bytes:
|
| 86 |
+
raise TypeError(f"Expected non-empty image bytes for {context}.")
|
| 87 |
+
with Image.open(io.BytesIO(img_bytes)) as img:
|
| 88 |
+
return img.copy()
|
| 89 |
+
|
| 90 |
+
|
| 91 |
def build_preview_instance_dataframe(preview_df: pd.DataFrame) -> pd.DataFrame:
|
| 92 |
"""Derive one row per preview instance from preview frame rows."""
|
| 93 |
instance_cols = [
|
|
|
|
| 122 |
return df
|
| 123 |
|
| 124 |
|
| 125 |
+
def train_frame_rows(preview_df: pd.DataFrame, instance_id: str, max_frames: int | None = None) -> list[dict[str, Any]]:
|
|
|
|
|
|
|
|
|
|
| 126 |
selected = preview_df[
|
| 127 |
(preview_df["instance_id"].astype(str) == str(instance_id))
|
| 128 |
+
& (preview_df["split"].astype(str) == "train")
|
| 129 |
].copy()
|
| 130 |
+
if selected.empty:
|
| 131 |
+
raise ValueError(f"Preview instance {instance_id!r} has no train split rows.")
|
| 132 |
+
selected = selected.sort_values("frame_id")
|
| 133 |
+
if max_frames is not None:
|
| 134 |
+
selected = selected.head(max_frames)
|
| 135 |
+
return selected.to_dict(orient="records")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def get_instance_thumbnail(preview_df: pd.DataFrame, instance_id: str) -> Image.Image:
|
| 139 |
+
row = train_frame_rows(preview_df, instance_id, max_frames=1)[0]
|
| 140 |
+
return decode_image_bytes(row["rgb"], f"{instance_id} thumbnail RGB")
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def instance_caption(row: dict[str, Any]) -> str:
|
| 144 |
+
return f"{row['model_name']} | {row['material_name']} | {row['env_name']}"
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def build_instance_gallery_items(
|
| 148 |
+
rows: list[dict[str, Any]],
|
| 149 |
+
preview_df: pd.DataFrame,
|
| 150 |
+
) -> list[tuple[Image.Image, str]]:
|
| 151 |
+
return [
|
| 152 |
+
(get_instance_thumbnail(preview_df, row["instance_id"]), instance_caption(row))
|
| 153 |
+
for row in rows
|
| 154 |
+
]
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
|
| 157 |
+
def load_instance_frames(
|
| 158 |
+
preview_df: pd.DataFrame, instance_id: str, max_frames: int = 50,
|
| 159 |
+
) -> list[dict[str, Any]]:
|
| 160 |
+
"""Load train preview image payloads for one instance from preview Parquet."""
|
| 161 |
+
rows = train_frame_rows(preview_df, instance_id, max_frames=max_frames)
|
| 162 |
frames: list[dict[str, Any]] = []
|
| 163 |
for example in rows:
|
| 164 |
frame_id = int(example["frame_id"])
|
| 165 |
frame_item: dict[str, Any] = {"frame_id": frame_id}
|
| 166 |
for key in ("rgb", "mask", "depth_preview", "normal_preview"):
|
| 167 |
+
frame_item[key] = decode_image_bytes(example[key], f"{key} frame {frame_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
frames.append(frame_item)
|
| 169 |
return frames
|
| 170 |
|
|
|
|
| 238 |
model_name_choices = get_distinct_text_choices(instance_df, "model_name")
|
| 239 |
material_name_choices = get_distinct_text_choices(instance_df, "material_name")
|
| 240 |
env_name_choices = get_distinct_text_choices(instance_df, "env_name")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
stats_md = build_stats_markdown(instance_df, preview_df)
|
| 242 |
|
| 243 |
+
def filtered_instance_rows(
|
|
|
|
|
|
|
| 244 |
model_name: str,
|
| 245 |
material_name: str,
|
| 246 |
env_name: str,
|
|
|
|
| 248 |
is_generated: str,
|
| 249 |
transparent: str,
|
| 250 |
near_light: str,
|
| 251 |
+
) -> tuple[pd.DataFrame, list[dict[str, Any]]]:
|
| 252 |
filtered = filter_dataframe_advanced(
|
| 253 |
instance_df,
|
| 254 |
model_name=model_name,
|
|
|
|
| 259 |
transparent=transparent,
|
| 260 |
near_light=near_light,
|
| 261 |
)
|
| 262 |
+
shown = filtered.head(MAX_RESULTS).copy()
|
|
|
|
| 263 |
rows = shown.to_dict(orient="records")
|
| 264 |
+
return filtered, rows
|
| 265 |
+
|
| 266 |
+
def filter_gallery(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
model_name: str,
|
| 268 |
material_name: str,
|
| 269 |
env_name: str,
|
|
|
|
| 272 |
transparent: str,
|
| 273 |
near_light: str,
|
| 274 |
):
|
| 275 |
+
filtered, rows = filtered_instance_rows(
|
|
|
|
| 276 |
model_name=model_name,
|
| 277 |
material_name=material_name,
|
| 278 |
env_name=env_name,
|
|
|
|
| 281 |
transparent=transparent,
|
| 282 |
near_light=near_light,
|
| 283 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
summary = f"Matched **{len(filtered)}** preview instances, showing **{len(rows)}**."
|
| 285 |
+
gallery_items = build_instance_gallery_items(rows, preview_df)
|
|
|
|
|
|
|
|
|
|
| 286 |
slider_empty = gr.update(minimum=1, maximum=1, step=1, value=1, interactive=False)
|
| 287 |
+
return summary, gallery_items, rows, {}, None, [], slider_empty, []
|
| 288 |
+
|
| 289 |
+
def on_instance_select(rows: list[dict[str, Any]], evt: gr.SelectData):
|
| 290 |
+
if not rows:
|
| 291 |
+
return {}, None, [], gr.update(minimum=1, maximum=1, step=1, value=1, interactive=False), []
|
| 292 |
+
idx = evt.index[0] if isinstance(evt.index, tuple) else evt.index
|
| 293 |
+
if not isinstance(idx, int) or idx < 0 or idx >= len(rows):
|
| 294 |
+
raise IndexError(f"Selected gallery index is out of range: {evt.index!r}")
|
| 295 |
+
|
| 296 |
+
row = rows[idx]
|
| 297 |
+
instance_id = row["instance_id"]
|
| 298 |
if not isinstance(instance_id, str) or not instance_id.strip():
|
| 299 |
raise ValueError(f"Selected instance row has invalid instance_id: {rows[idx]!r}")
|
| 300 |
logger.info("Loading images for instance: %s", instance_id)
|
| 301 |
+
frame_items = load_instance_frames(preview_df, instance_id, max_frames=50)
|
|
|
|
|
|
|
| 302 |
slider_ready = gr.update(
|
| 303 |
minimum=1,
|
| 304 |
maximum=len(frame_items),
|
|
|
|
| 306 |
value=1,
|
| 307 |
interactive=True,
|
| 308 |
)
|
| 309 |
+
return row, download_glb(row["glb_path"]), render_frame_gallery(frame_items, 1), slider_ready, frame_items
|
| 310 |
|
| 311 |
def on_frame_change(frame_idx: float, frame_items: list[dict[str, Any]]):
|
| 312 |
return render_frame_gallery(frame_items, frame_idx)
|
| 313 |
|
| 314 |
+
initial_rows = instance_df.head(MAX_RESULTS).to_dict(orient="records")
|
| 315 |
+
initial_gallery = build_instance_gallery_items(initial_rows, preview_df)
|
| 316 |
+
initial_summary = f"Matched **{len(instance_df)}** preview instances, showing **{len(initial_rows)}**."
|
| 317 |
|
| 318 |
with gr.Blocks(title="3DReflecNet Dataset Explorer") as demo:
|
| 319 |
gr.Markdown("# 3DReflecNet Dataset Explorer")
|
| 320 |
gr.Markdown(
|
| 321 |
+
"Browse the configured preview subset. Select an RGB thumbnail to inspect the instance."
|
| 322 |
)
|
| 323 |
gr.Markdown(stats_md)
|
| 324 |
|
| 325 |
+
with gr.Row():
|
| 326 |
+
model_name = gr.Dropdown(label="model_name", choices=model_name_choices, value=FILTER_ALL)
|
| 327 |
+
material_name = gr.Dropdown(label="material_name", choices=material_name_choices, value=FILTER_ALL)
|
| 328 |
+
env_name = gr.Dropdown(label="env_name", choices=env_name_choices, value=FILTER_ALL)
|
| 329 |
+
with gr.Row():
|
| 330 |
+
has_glass = gr.Dropdown(label="hasGlass", choices=BOOL_FILTER_CHOICES, value=FILTER_ALL)
|
| 331 |
+
is_generated = gr.Dropdown(label="isGenerated", choices=BOOL_FILTER_CHOICES, value=FILTER_ALL)
|
| 332 |
+
transparent = gr.Dropdown(label="transparent", choices=BOOL_FILTER_CHOICES, value=FILTER_ALL)
|
| 333 |
+
near_light = gr.Dropdown(label="near_light", choices=BOOL_FILTER_CHOICES, value=FILTER_ALL)
|
| 334 |
+
filter_btn = gr.Button("Apply Filters", variant="primary")
|
| 335 |
+
|
| 336 |
+
summary = gr.Markdown(initial_summary)
|
| 337 |
+
instance_gallery = gr.Gallery(
|
| 338 |
+
label="Preview Instances",
|
| 339 |
+
value=initial_gallery,
|
| 340 |
+
columns=5,
|
| 341 |
+
object_fit="contain",
|
| 342 |
+
height="auto",
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
with gr.Row():
|
| 346 |
+
instance_meta = gr.JSON(label="Instance Metadata")
|
| 347 |
+
model_viewer = gr.Model3D(
|
| 348 |
+
label="3D Preview (GLB)",
|
| 349 |
+
clear_color=(0.35, 0.35, 0.38, 1.0),
|
| 350 |
+
camera_position=(35, 70, 3.5),
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
frame_gallery = gr.Gallery(label="RGB / Mask / Depth / Normal", columns=4, rows=1, object_fit="contain", height="auto")
|
| 354 |
+
frame_slider = gr.Slider(
|
| 355 |
+
label="Frame",
|
| 356 |
+
minimum=1,
|
| 357 |
+
maximum=1,
|
| 358 |
+
step=1,
|
| 359 |
+
value=1,
|
| 360 |
+
interactive=False,
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
instance_state = gr.State(initial_rows)
|
| 364 |
+
frame_state = gr.State([])
|
| 365 |
+
|
| 366 |
+
filter_btn.click(
|
| 367 |
+
fn=filter_gallery,
|
| 368 |
+
inputs=[
|
| 369 |
+
model_name,
|
| 370 |
+
material_name,
|
| 371 |
+
env_name,
|
| 372 |
+
has_glass,
|
| 373 |
+
is_generated,
|
| 374 |
+
transparent,
|
| 375 |
+
near_light,
|
| 376 |
+
],
|
| 377 |
+
outputs=[
|
| 378 |
+
summary,
|
| 379 |
+
instance_gallery,
|
| 380 |
+
instance_state,
|
| 381 |
+
instance_meta,
|
| 382 |
+
model_viewer,
|
| 383 |
+
frame_gallery,
|
| 384 |
+
frame_slider,
|
| 385 |
+
frame_state,
|
| 386 |
+
],
|
| 387 |
+
)
|
| 388 |
+
instance_gallery.select(
|
| 389 |
+
fn=on_instance_select,
|
| 390 |
+
inputs=[instance_state],
|
| 391 |
+
outputs=[instance_meta, model_viewer, frame_gallery, frame_slider, frame_state],
|
| 392 |
+
)
|
| 393 |
+
frame_slider.change(
|
| 394 |
+
fn=on_frame_change,
|
| 395 |
+
inputs=[frame_slider, frame_state],
|
| 396 |
+
outputs=[frame_gallery],
|
| 397 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
|
| 399 |
return demo
|
| 400 |
|