SynLayers commited on
Commit
78cf8f7
·
verified ·
1 Parent(s): b7c3df9

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py CHANGED
@@ -5,6 +5,7 @@ import sys
5
  from pathlib import Path
6
 
7
  import gradio as gr
 
8
 
9
  CURRENT_FILE = Path(__file__).resolve()
10
  PROJECT_ROOT = CURRENT_FILE.parents[1]
@@ -53,6 +54,45 @@ def build_gallery(result: dict) -> list[tuple[str, str]]:
53
  return gallery
54
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def run_demo(
57
  image_path: str,
58
  sample_name: str,
@@ -101,8 +141,13 @@ with gr.Blocks(title="SynLayers Real-World Demo") as demo:
101
 
102
  This Space is intended to run on GPU hardware. The first request may take time
103
  while model assets are loaded from Hugging Face.
 
 
 
104
  """
105
  )
 
 
106
 
107
  with gr.Row():
108
  with gr.Column(scale=1):
@@ -142,6 +187,11 @@ with gr.Blocks(title="SynLayers Real-World Demo") as demo:
142
  if examples:
143
  gr.Examples(examples=examples, inputs=[image_input], label="Example Images")
144
 
 
 
 
 
 
145
  run_button.click(
146
  fn=run_demo,
147
  inputs=[
 
5
  from pathlib import Path
6
 
7
  import gradio as gr
8
+ import torch
9
 
10
  CURRENT_FILE = Path(__file__).resolve()
11
  PROJECT_ROOT = CURRENT_FILE.parents[1]
 
54
  return gallery
55
 
56
 
57
+ def get_gpu_name() -> str:
58
+ if not torch.cuda.is_available():
59
+ return "None"
60
+ try:
61
+ return torch.cuda.get_device_name(torch.cuda.current_device())
62
+ except Exception as exc: # pragma: no cover - defensive runtime reporting
63
+ return f"Unavailable ({exc})"
64
+
65
+
66
+ def get_runtime_status_markdown() -> str:
67
+ accelerator = os.environ.get("ACCELERATOR", "unknown")
68
+ space_id = os.environ.get("SPACE_ID", "local")
69
+ model_repo = os.environ.get("SYNLAYERS_MODEL_REPO", "(unset)")
70
+ cuda_available = torch.cuda.is_available()
71
+
72
+ lines = [
73
+ "## Runtime Status",
74
+ f"- `SPACE_ID`: `{space_id}`",
75
+ f"- `ACCELERATOR`: `{accelerator}`",
76
+ f"- `CUDA available`: `{cuda_available}`",
77
+ f"- `GPU device`: `{get_gpu_name()}`",
78
+ f"- `SYNLAYERS_MODEL_REPO`: `{model_repo}`",
79
+ "",
80
+ ]
81
+
82
+ if accelerator == "none" or not cuda_available:
83
+ lines.extend(
84
+ [
85
+ "This Space is not currently running with a usable CUDA GPU.",
86
+ "The GPU type must be chosen by the Space owner in Hugging Face `Settings -> Hardware`.",
87
+ "Visitors cannot switch GPUs from inside the Gradio app.",
88
+ ]
89
+ )
90
+ else:
91
+ lines.append("The CUDA runtime is available and the full SynLayers pipeline can run here.")
92
+
93
+ return "\n".join(lines)
94
+
95
+
96
  def run_demo(
97
  image_path: str,
98
  sample_name: str,
 
141
 
142
  This Space is intended to run on GPU hardware. The first request may take time
143
  while model assets are loaded from Hugging Face.
144
+
145
+ GPU hardware is selected in the Hugging Face Space settings by the owner,
146
+ not from inside this app.
147
  """
148
  )
149
+ runtime_status = gr.Markdown(get_runtime_status_markdown())
150
+ refresh_status_button = gr.Button("Refresh Runtime Status")
151
 
152
  with gr.Row():
153
  with gr.Column(scale=1):
 
187
  if examples:
188
  gr.Examples(examples=examples, inputs=[image_input], label="Example Images")
189
 
190
+ refresh_status_button.click(
191
+ fn=get_runtime_status_markdown,
192
+ outputs=runtime_status,
193
+ )
194
+
195
  run_button.click(
196
  fn=run_demo,
197
  inputs=[