Sen Fang commited on
Commit
1199e9d
·
1 Parent(s): 017f697

Improve orchestration robustness, monitoring, and DWPose optimization

Browse files
debug/benchmark_dwpose_cpu_vs_gpu.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ import os
7
+ import subprocess
8
+ import sys
9
+ import time
10
+ from pathlib import Path
11
+
12
+
13
+ DEFAULT_RAW_VIDEO_DIR = Path("/home/sf895/Sign-DWPose-2M-runtime/raw_video")
14
+ VIDEO_EXTENSIONS = (".mp4", ".mkv", ".webm", ".mov")
15
+
16
+
17
+ def parse_args() -> argparse.Namespace:
18
+ parser = argparse.ArgumentParser(
19
+ description="Benchmark DWpose CPU vs GPU on the same video and estimate CPU threads per 1 GPU."
20
+ )
21
+ parser.add_argument("--video-id", type=str, required=True)
22
+ parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
23
+ parser.add_argument("--fps", type=int, default=24)
24
+ parser.add_argument("--max-frames", type=int, default=None)
25
+ parser.add_argument(
26
+ "--cpu-threads",
27
+ type=str,
28
+ default="1,2,4,8,16,32",
29
+ help="Comma-separated CPU thread counts to benchmark.",
30
+ )
31
+ parser.add_argument(
32
+ "--device",
33
+ choices=("cpu", "gpu"),
34
+ default=None,
35
+ help="Internal worker mode. Omit to run the controller.",
36
+ )
37
+ parser.add_argument("--json", action="store_true", help="Emit final summary as JSON.")
38
+ return parser.parse_args()
39
+
40
+
41
+ def resolve_video_path(video_id: str, raw_video_dir: Path) -> Path:
42
+ for ext in VIDEO_EXTENSIONS:
43
+ candidate = raw_video_dir / f"{video_id}{ext}"
44
+ if candidate.is_file():
45
+ return candidate
46
+ raise FileNotFoundError(f"Video not found for {video_id} under {raw_video_dir}")
47
+
48
+
49
+ def run_ffprobe_dims(video_path: Path) -> tuple[int, int]:
50
+ proc = subprocess.run(
51
+ [
52
+ "ffprobe",
53
+ "-v",
54
+ "error",
55
+ "-select_streams",
56
+ "v:0",
57
+ "-show_entries",
58
+ "stream=width,height",
59
+ "-of",
60
+ "csv=p=0:s=x",
61
+ str(video_path),
62
+ ],
63
+ check=True,
64
+ capture_output=True,
65
+ text=True,
66
+ )
67
+ dims = (proc.stdout or "").strip()
68
+ if "x" not in dims:
69
+ raise RuntimeError(f"Unable to parse ffprobe dimensions: {dims!r}")
70
+ width_s, height_s = dims.split("x", 1)
71
+ return int(width_s), int(height_s)
72
+
73
+
74
+ def stream_frames(video_path: Path, fps: int, max_frames: int | None):
75
+ import numpy as np
76
+ from PIL import Image
77
+
78
+ width, height = run_ffprobe_dims(video_path)
79
+ frame_bytes = width * height * 3
80
+ command = [
81
+ "ffmpeg",
82
+ "-hide_banner",
83
+ "-loglevel",
84
+ "error",
85
+ "-i",
86
+ str(video_path),
87
+ "-vf",
88
+ f"fps={fps}",
89
+ "-f",
90
+ "rawvideo",
91
+ "-pix_fmt",
92
+ "rgb24",
93
+ "pipe:1",
94
+ ]
95
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
96
+ assert proc.stdout is not None
97
+ frame_index = 0
98
+ stopped_early = False
99
+ try:
100
+ while True:
101
+ if max_frames is not None and frame_index >= max_frames:
102
+ stopped_early = True
103
+ break
104
+ chunk = proc.stdout.read(frame_bytes)
105
+ if not chunk:
106
+ break
107
+ if len(chunk) != frame_bytes:
108
+ raise RuntimeError(
109
+ f"Short raw frame read: expected {frame_bytes} bytes, got {len(chunk)}"
110
+ )
111
+ frame_index += 1
112
+ frame_array = np.frombuffer(chunk, dtype=np.uint8).reshape((height, width, 3))
113
+ yield frame_index, Image.fromarray(frame_array, mode="RGB")
114
+ finally:
115
+ if stopped_early and proc.poll() is None:
116
+ proc.terminate()
117
+ if proc.stdout:
118
+ proc.stdout.close()
119
+ stderr = proc.stderr.read().decode("utf-8", errors="replace") if proc.stderr else ""
120
+ if proc.stderr:
121
+ proc.stderr.close()
122
+ returncode = proc.wait()
123
+ if returncode != 0 and not stopped_early:
124
+ raise RuntimeError(f"ffmpeg raw frame stream failed: {stderr.strip()}")
125
+
126
+
127
+ def worker_main(args: argparse.Namespace) -> int:
128
+ video_path = resolve_video_path(args.video_id, args.raw_video_dir)
129
+
130
+ if args.device == "cpu":
131
+ cpu_threads = int(os.environ.get("DWPOSE_CPU_THREADS", "1"))
132
+ os.environ["OMP_NUM_THREADS"] = str(cpu_threads)
133
+ os.environ["MKL_NUM_THREADS"] = str(cpu_threads)
134
+ os.environ["OPENBLAS_NUM_THREADS"] = str(cpu_threads)
135
+ os.environ["NUMEXPR_NUM_THREADS"] = str(cpu_threads)
136
+ os.environ["ORT_NUM_THREADS"] = str(cpu_threads)
137
+
138
+ from easy_dwpose import DWposeDetector
139
+
140
+ device = "cpu" if args.device == "cpu" else "cuda:0"
141
+ detector = DWposeDetector(device=device)
142
+
143
+ start = time.perf_counter()
144
+ frames = 0
145
+ for frame_index, frame in stream_frames(video_path, args.fps, args.max_frames):
146
+ detector(frame, draw_pose=False, include_hands=True, include_face=True)
147
+ frames = frame_index
148
+ elapsed = time.perf_counter() - start
149
+ result = {
150
+ "video_id": args.video_id,
151
+ "video_path": str(video_path),
152
+ "device": args.device,
153
+ "fps": args.fps,
154
+ "max_frames": args.max_frames,
155
+ "frames_processed": frames,
156
+ "elapsed_seconds": elapsed,
157
+ "frames_per_second": (frames / elapsed) if elapsed > 0 else 0.0,
158
+ "cpu_threads": int(os.environ.get("DWPOSE_CPU_THREADS", "0")) if args.device == "cpu" else 0,
159
+ "hostname": os.uname().nodename,
160
+ }
161
+ print(json.dumps(result, sort_keys=True))
162
+ return 0
163
+
164
+
165
+ def run_worker(script_path: Path, args: argparse.Namespace, device: str, cpu_threads: int | None = None) -> dict:
166
+ env = os.environ.copy()
167
+ if cpu_threads is not None:
168
+ env["DWPOSE_CPU_THREADS"] = str(cpu_threads)
169
+ cmd = [
170
+ sys.executable,
171
+ str(script_path),
172
+ "--video-id",
173
+ args.video_id,
174
+ "--raw-video-dir",
175
+ str(args.raw_video_dir),
176
+ "--fps",
177
+ str(args.fps),
178
+ "--device",
179
+ device,
180
+ ]
181
+ if args.max_frames is not None:
182
+ cmd.extend(["--max-frames", str(args.max_frames)])
183
+ proc = subprocess.run(cmd, check=True, capture_output=True, text=True, env=env)
184
+ lines = [line.strip() for line in proc.stdout.splitlines() if line.strip()]
185
+ if not lines:
186
+ raise RuntimeError(f"No benchmark output returned for device={device}")
187
+ return json.loads(lines[-1])
188
+
189
+
190
+ def controller_main(args: argparse.Namespace) -> int:
191
+ script_path = Path(__file__).resolve()
192
+ video_path = resolve_video_path(args.video_id, args.raw_video_dir)
193
+ cpu_threads_list = [int(x) for x in args.cpu_threads.split(",") if x.strip()]
194
+
195
+ gpu_result = run_worker(script_path, args, "gpu")
196
+ cpu_results = [run_worker(script_path, args, "cpu", cpu_threads=t) for t in cpu_threads_list]
197
+
198
+ summary = {
199
+ "video_id": args.video_id,
200
+ "video_path": str(video_path),
201
+ "fps": args.fps,
202
+ "max_frames": args.max_frames,
203
+ "gpu_result": gpu_result,
204
+ "cpu_results": [],
205
+ }
206
+ gpu_elapsed = gpu_result["elapsed_seconds"]
207
+ for cpu_result in cpu_results:
208
+ cpu_elapsed = cpu_result["elapsed_seconds"]
209
+ cpu_threads = cpu_result["cpu_threads"]
210
+ cpu_equivalent_threads = (cpu_threads * cpu_elapsed / gpu_elapsed) if gpu_elapsed > 0 else None
211
+ merged = dict(cpu_result)
212
+ merged["speedup_gpu_over_cpu"] = (cpu_elapsed / gpu_elapsed) if gpu_elapsed > 0 else None
213
+ merged["approx_cpu_threads_for_one_gpu"] = cpu_equivalent_threads
214
+ summary["cpu_results"].append(merged)
215
+
216
+ if args.json:
217
+ print(json.dumps(summary, indent=2, sort_keys=True))
218
+ return 0
219
+
220
+ print(f"video_id={summary['video_id']}")
221
+ print(f"video_path={summary['video_path']}")
222
+ print(f"fps={summary['fps']}")
223
+ print(f"max_frames={summary['max_frames']}")
224
+ print(
225
+ "gpu_result "
226
+ f"elapsed_seconds={gpu_result['elapsed_seconds']:.3f} "
227
+ f"frames_processed={gpu_result['frames_processed']} "
228
+ f"frames_per_second={gpu_result['frames_per_second']:.3f}"
229
+ )
230
+ for row in summary["cpu_results"]:
231
+ print(
232
+ "cpu_result "
233
+ f"threads={row['cpu_threads']} "
234
+ f"elapsed_seconds={row['elapsed_seconds']:.3f} "
235
+ f"frames_processed={row['frames_processed']} "
236
+ f"frames_per_second={row['frames_per_second']:.3f} "
237
+ f"speedup_gpu_over_cpu={row['speedup_gpu_over_cpu']:.3f} "
238
+ f"approx_cpu_threads_for_one_gpu={row['approx_cpu_threads_for_one_gpu']:.2f}"
239
+ )
240
+ return 0
241
+
242
+
243
+ def main() -> int:
244
+ args = parse_args()
245
+ if args.device is not None:
246
+ return worker_main(args)
247
+ return controller_main(args)
248
+
249
+
250
+ if __name__ == "__main__":
251
+ raise SystemExit(main())
debug/benchmark_dwpose_cpu_vs_gpu.slurm ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=dwpose-cpu-gpu-bench
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=32
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --mem=32G
8
+ #SBATCH --time=02:00:00
9
+ #SBATCH --output=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/dwpose_cpu_gpu_bench_%j.out
10
+ #SBATCH --error=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/dwpose_cpu_gpu_bench_%j.err
11
+
12
+ set -euo pipefail
13
+
14
+ ROOT_DIR="/cache/home/sf895/Sign-DWPose-2M"
15
+ CONDA_SH="/home/sf895/miniconda3/etc/profile.d/conda.sh"
16
+ CONDA_ENV="signx2"
17
+ SCRIPT="$ROOT_DIR/debug/benchmark_dwpose_cpu_vs_gpu.py"
18
+ VIDEO_ID="${1:?video id required}"
19
+ FPS="${FPS:-24}"
20
+ MAX_FRAMES="${MAX_FRAMES:-240}"
21
+ CPU_THREADS="${CPU_THREADS:-1,2,4,8,16,32}"
22
+ RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-/home/sf895/Sign-DWPose-2M-runtime/raw_video}"
23
+
24
+ source "$CONDA_SH"
25
+
26
+ printf 'video_id=%s\n' "$VIDEO_ID"
27
+ printf 'hostname=%s\n' "$(hostname)"
28
+ printf 'cuda_visible_devices=%s\n' "${CUDA_VISIBLE_DEVICES:-unset}"
29
+ printf 'fps=%s\n' "$FPS"
30
+ printf 'max_frames=%s\n' "$MAX_FRAMES"
31
+ printf 'cpu_threads=%s\n' "$CPU_THREADS"
32
+
33
+ conda run -n "$CONDA_ENV" python "$SCRIPT" \
34
+ --video-id "$VIDEO_ID" \
35
+ --raw-video-dir "$RAW_VIDEO_DIR" \
36
+ --fps "$FPS" \
37
+ --max-frames "$MAX_FRAMES" \
38
+ --cpu-threads "$CPU_THREADS"
debug/cluster_capacity_snapshot.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import re
5
+ import subprocess
6
+ import sys
7
+ from collections import Counter
8
+
9
+
10
+ def run(cmd: list[str]) -> str:
11
+ proc = subprocess.run(cmd, check=False, capture_output=True, text=True)
12
+ if proc.returncode != 0:
13
+ raise RuntimeError(f"command failed: {' '.join(cmd)}\n{proc.stderr.strip()}")
14
+ return proc.stdout
15
+
16
+
17
+ def count_free_gpus(partition: str) -> tuple[int, int]:
18
+ nodes_out = run(["sinfo", "-h", "-N", "-p", partition, "-o", "%N"])
19
+ nodes = [line.strip() for line in nodes_out.splitlines() if line.strip()]
20
+ total = 0
21
+ free = 0
22
+ for node in nodes:
23
+ node_out = run(["scontrol", "show", "node", node, "-o"]).strip()
24
+ if not node_out:
25
+ continue
26
+ state_m = re.search(r"\bState=([^ ]+)", node_out)
27
+ state = state_m.group(1).lower() if state_m else ""
28
+ if any(flag in state for flag in ("drain", "drained", "down", "fail", "inval")):
29
+ continue
30
+ cfg_m = re.search(r"\bCfgTRES=.*?(?:,|^)gres/gpu=(\d+)", node_out)
31
+ alloc_m = re.search(r"\bAllocTRES=.*?(?:,|^)gres/gpu=(\d+)", node_out)
32
+ node_total = int(cfg_m.group(1)) if cfg_m else 0
33
+ node_used = int(alloc_m.group(1)) if alloc_m else 0
34
+ total += node_total
35
+ free += max(0, node_total - node_used)
36
+ return free, total
37
+
38
+
39
+ def count_cpu_capacity(partition: str) -> tuple[int, int, int]:
40
+ out = run(["sinfo", "-h", "-N", "-p", partition, "-o", "%N|%C|%t"])
41
+ free_cpu = 0
42
+ total_cpu = 0
43
+ nodes_with_free_cpu = 0
44
+ for line in out.splitlines():
45
+ if not line.strip() or "|" not in line:
46
+ continue
47
+ _node, cpu_field, state = line.split("|", 2)
48
+ state = state.strip().lower()
49
+ if any(flag in state for flag in ("drain", "drained", "down", "fail", "inval")):
50
+ continue
51
+ parts = cpu_field.split("/")
52
+ if len(parts) != 4:
53
+ continue
54
+ _alloc, idle, _other, total = (int(x) for x in parts)
55
+ free_cpu += idle
56
+ total_cpu += total
57
+ if idle > 0:
58
+ nodes_with_free_cpu += 1
59
+ return free_cpu, total_cpu, nodes_with_free_cpu
60
+
61
+
62
+ def user_job_breakdown(user: str) -> Counter[tuple[str, str]]:
63
+ out = run(["squeue", "-u", user, "-h", "-o", "%j|%T"])
64
+ counter: Counter[tuple[str, str]] = Counter()
65
+ for line in out.splitlines():
66
+ if not line.strip() or "|" not in line:
67
+ continue
68
+ job, state = line.split("|", 1)
69
+ counter[(job.strip(), state.strip())] += 1
70
+ return counter
71
+
72
+
73
+ def main() -> int:
74
+ user = sys.argv[1] if len(sys.argv) > 1 else "sf895"
75
+ cpu_free, cpu_total, cpu_nodes = count_cpu_capacity("main")
76
+ gpu_parts = ["gpu", "gpu-redhat", "cgpu"]
77
+ print(f"user={user}")
78
+ print(f"cpu_partition=main free_cpu={cpu_free} total_cpu={cpu_total} nodes_with_free_cpu={cpu_nodes}")
79
+ for part in gpu_parts:
80
+ free, total = count_free_gpus(part)
81
+ print(f"gpu_partition={part} free_gpu={free} total_gpu={total}")
82
+ jobs = user_job_breakdown(user)
83
+ for (job, state), count in sorted(jobs.items()):
84
+ print(f"user_jobs job={job} state={state} count={count}")
85
+ return 0
86
+
87
+
88
+ if __name__ == "__main__":
89
+ raise SystemExit(main())
reproduce_independently_slurm.sh CHANGED
@@ -23,6 +23,7 @@ PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
23
  PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
24
  PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
25
  PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}"
 
26
 
27
  STAGE="${STAGE:-all}"
28
  LIMIT="${LIMIT:-}"
@@ -32,10 +33,11 @@ WORKERS="${WORKERS:-}"
32
  TARGET_BYTES="${TARGET_BYTES:-$((10 * 1024 * 1024 * 1024))}"
33
  TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
34
  DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
35
- DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-4}"
36
  USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}"
37
  PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
38
  MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
 
39
  RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-180}"
40
  MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
41
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
@@ -58,9 +60,11 @@ DOWNLOAD_TIME="${DOWNLOAD_TIME:-04:00:00}"
58
  DOWNLOAD_CPUS_PER_TASK="${DOWNLOAD_CPUS_PER_TASK:-1}"
59
  DOWNLOAD_MEM="${DOWNLOAD_MEM:-4G}"
60
  DOWNLOAD_ARRAY_PARALLEL="${DOWNLOAD_ARRAY_PARALLEL:-32}"
61
- DOWNLOAD_MAX_ACTIVE="${DOWNLOAD_MAX_ACTIVE:-30}"
62
  DOWNLOAD_START_STAGGER_MIN="${DOWNLOAD_START_STAGGER_MIN:-1}"
63
  DOWNLOAD_START_STAGGER_MAX="${DOWNLOAD_START_STAGGER_MAX:-3}"
 
 
64
  ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
65
  ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
66
  ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}"
@@ -539,6 +543,18 @@ run_process_stage() {
539
  return "$status"
540
  }
541
 
 
 
 
 
 
 
 
 
 
 
 
 
542
  run_upload_stage() {
543
  local require_target="${1:-0}"
544
  local cmd=(python "$PIPELINE03"
@@ -614,19 +630,126 @@ print(pending)
614
  PY
615
  }
616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617
  cleanup_stale_download_claims() {
618
  python - <<PY
619
  import os
620
  import subprocess
 
621
  from pathlib import Path
622
  claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
623
  claim_dir.mkdir(parents=True, exist_ok=True)
624
  removed = 0
 
 
 
625
  try:
626
- result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
627
- active_jobs = {line.split("_", 1)[0].strip() for line in result.stdout.splitlines() if line.strip()}
 
 
 
 
 
 
 
628
  except Exception:
629
- active_jobs = set()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630
  for claim_path in claim_dir.glob("*.claim"):
631
  try:
632
  lines = claim_path.read_text(encoding="utf-8").splitlines()
@@ -634,6 +757,9 @@ for claim_path in claim_dir.glob("*.claim"):
634
  continue
635
  pid = None
636
  job_id = ""
 
 
 
637
  for line in lines:
638
  if line.startswith("pid="):
639
  try:
@@ -642,10 +768,27 @@ for claim_path in claim_dir.glob("*.claim"):
642
  pid = None
643
  elif line.startswith("job_id="):
644
  job_id = line.split("=", 1)[1].strip()
 
 
 
 
 
 
645
  alive = False
646
- if job_id:
647
- alive = job_id in active_jobs
648
- elif pid is not None:
 
 
 
 
 
 
 
 
 
 
 
649
  try:
650
  os.kill(pid, 0)
651
  alive = True
@@ -658,6 +801,51 @@ print(removed)
658
  PY
659
  }
660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
661
  dir_size_bytes() {
662
  python - <<PY
663
  from pathlib import Path
@@ -677,33 +865,213 @@ PY
677
  count_active_process_claims() {
678
  python - <<PY
679
  import subprocess
 
680
  from pathlib import Path
 
681
  claim_dir = Path("$STATE_ROOT/slurm/state/claims")
682
  claim_dir.mkdir(parents=True, exist_ok=True)
 
 
 
 
683
  try:
684
- result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
685
- active_jobs = {line.split("_", 1)[0].strip() for line in result.stdout.splitlines() if line.strip()}
 
 
 
 
 
 
 
686
  except Exception:
687
- active_jobs = set()
 
688
  count = 0
689
  for claim_path in claim_dir.glob("*.claim"):
690
  try:
691
  lines = claim_path.read_text(encoding="utf-8").splitlines()
692
  except OSError:
693
  continue
694
- job_id = ""
695
  for line in lines:
696
- if line.startswith("job_id="):
697
- job_id = line.split("=", 1)[1].strip()
698
- break
699
- if job_id and job_id in active_jobs:
700
- count += 1
 
 
 
 
 
 
 
 
 
701
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702
  claim_path.unlink(missing_ok=True)
 
 
703
  print(count)
704
  PY
705
  }
706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
707
  count_complete_pending_upload() {
708
  python - <<PY
709
  import json
@@ -758,22 +1126,52 @@ download_loop() {
758
  local iteration=0
759
  while true; do
760
  iteration=$((iteration + 1))
761
- local pruned stale_download_claims
762
- pruned="$(prune_processed_raw_videos)"
763
- stale_download_claims="$(cleanup_stale_download_claims)"
 
 
 
 
 
 
 
 
 
 
764
  local pending_download pending_process raw_video_bytes
765
- pending_download="$(count_pending_downloads)"
766
- pending_process="$(count_pending_process)"
767
- raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR")"
768
- echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims"
 
 
 
769
 
770
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
771
  echo "[download] reached max iterations: $MAX_ITERATIONS"
772
  break
773
  fi
774
  if [[ "$pending_download" -eq 0 ]]; then
775
- echo "[download] nothing left to download"
776
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777
  fi
778
  if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then
779
  echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT"
@@ -789,6 +1187,21 @@ download_loop() {
789
  if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then
790
  echo "[download] pipeline01 failed; retry after sleep"
791
  sleep "$IDLE_SLEEP_SECONDS"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
792
  fi
793
  done
794
  }
@@ -796,15 +1209,26 @@ download_loop() {
796
  process_loop() {
797
  local iteration=0
798
  local submitted_total=0
 
799
  while true; do
800
  iteration=$((iteration + 1))
801
- local pruned
802
- pruned="$(prune_processed_raw_videos)"
 
 
 
 
 
 
 
803
  local pending_download pending_process active_process_claims remaining_limit cycle_limit
804
- pending_download="$(count_pending_downloads)"
805
- pending_process="$(count_pending_process)"
806
- active_process_claims="$(count_active_process_claims)"
807
- echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process slurm_backlog=$active_process_claims submitted_total=$submitted_total pruned_raw_videos=$pruned"
 
 
 
808
 
809
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
810
  echo "[process] reached max iterations: $MAX_ITERATIONS"
@@ -853,6 +1277,7 @@ process_loop() {
853
  sleep "$IDLE_SLEEP_SECONDS"
854
  else
855
  submitted_total=$((submitted_total + RUN_PROCESS_STAGE_SUBMITTED_COUNT))
 
856
  fi
857
  sleep "$IDLE_SLEEP_SECONDS"
858
  done
@@ -896,6 +1321,8 @@ upload_loop() {
896
  if ! run_upload_stage "$require_target"; then
897
  echo "[upload] pipeline03 failed; retry after sleep"
898
  sleep "$IDLE_SLEEP_SECONDS"
 
 
899
  fi
900
  done
901
  }
 
23
  PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
24
  PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
25
  PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}"
26
+ PIPELINE_SYNC="${PIPELINE_SYNC:-$ROOT_DIR/scripts/sync_processed_csv_from_runtime.py}"
27
 
28
  STAGE="${STAGE:-all}"
29
  LIMIT="${LIMIT:-}"
 
33
  TARGET_BYTES="${TARGET_BYTES:-$((10 * 1024 * 1024 * 1024))}"
34
  TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
35
  DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
36
+ DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-60}"
37
  USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}"
38
  PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
39
  MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
40
+ PROCESS_PENDING_TIMEOUT_SECONDS="${PROCESS_PENDING_TIMEOUT_SECONDS:-1800}"
41
  RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-180}"
42
  MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
43
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
 
60
  DOWNLOAD_CPUS_PER_TASK="${DOWNLOAD_CPUS_PER_TASK:-1}"
61
  DOWNLOAD_MEM="${DOWNLOAD_MEM:-4G}"
62
  DOWNLOAD_ARRAY_PARALLEL="${DOWNLOAD_ARRAY_PARALLEL:-32}"
63
+ DOWNLOAD_MAX_ACTIVE="${DOWNLOAD_MAX_ACTIVE:-60}"
64
  DOWNLOAD_START_STAGGER_MIN="${DOWNLOAD_START_STAGGER_MIN:-1}"
65
  DOWNLOAD_START_STAGGER_MAX="${DOWNLOAD_START_STAGGER_MAX:-3}"
66
+ DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}"
67
+ DOWNLOAD_PARTIAL_TIMEOUT_SECONDS="${DOWNLOAD_PARTIAL_TIMEOUT_SECONDS:-1800}"
68
  ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
69
  ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
70
  ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}"
 
543
  return "$status"
544
  }
545
 
546
+ run_sync_csv_stage() {
547
+ python "$PIPELINE_SYNC" \
548
+ --source-metadata-csv "$SOURCE_METADATA_CSV" \
549
+ --output-metadata-csv "$OUTPUT_METADATA_CSV" \
550
+ --raw-video-dir "$RAW_VIDEO_DIR" \
551
+ --raw-caption-dir "$RAW_CAPTION_DIR" \
552
+ --raw-metadata-dir "$RAW_METADATA_DIR" \
553
+ --dataset-dir "$DATASET_DIR" \
554
+ --progress-path "$PROGRESS_JSON" \
555
+ --status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
556
+ }
557
+
558
  run_upload_stage() {
559
  local require_target="${1:-0}"
560
  local cmd=(python "$PIPELINE03"
 
630
  PY
631
  }
632
 
633
+ count_download_stop_guard() {
634
+ python - <<PY
635
+ import csv
636
+ import json
637
+ import re
638
+ import subprocess
639
+ from pathlib import Path
640
+ source_csv = Path("$SOURCE_METADATA_CSV")
641
+ processed_csv = Path("$OUTPUT_METADATA_CSV")
642
+ progress_json = Path("$PROGRESS_JSON")
643
+ source_rows = 0
644
+ terminal_rows = 0
645
+ uploaded_rows = 0
646
+ if source_csv.exists():
647
+ with source_csv.open("r", encoding="utf-8-sig", newline="") as handle:
648
+ for row in csv.reader(handle):
649
+ if row and (row[0] or "").strip():
650
+ source_rows += 1
651
+ if processed_csv.exists():
652
+ with processed_csv.open("r", encoding="utf-8-sig", newline="") as handle:
653
+ reader = csv.DictReader(handle)
654
+ for row in reader:
655
+ if (row.get("download_status") or "").strip() in {"ok", "skipped"}:
656
+ terminal_rows += 1
657
+ if progress_json.exists():
658
+ try:
659
+ uploaded_rows = len(json.loads(progress_json.read_text()).get("uploaded_folders", {}))
660
+ except Exception:
661
+ uploaded_rows = 0
662
+ line_re = re.compile(r"^(?P<jobid>[^|]+)\|(?P<job>[^|]+)\|(?P<state>[^|]+)$")
663
+ array_re = re.compile(r"^(\d+)_\[(.+)\]$")
664
+ def expand_count(jobid_token: str) -> int:
665
+ m = array_re.match(jobid_token)
666
+ if not m:
667
+ return 1
668
+ body = m.group(2)
669
+ if "%" in body:
670
+ body = body.split("%", 1)[0]
671
+ total = 0
672
+ for part in body.split(","):
673
+ part = part.strip()
674
+ if not part:
675
+ continue
676
+ if "-" in part:
677
+ a, b = part.split("-", 1)
678
+ try:
679
+ total += int(b) - int(a) + 1
680
+ except ValueError:
681
+ total += 1
682
+ else:
683
+ total += 1
684
+ return max(total, 1)
685
+ live_download_jobs = 0
686
+ try:
687
+ proc = subprocess.run(["squeue", "-u", "$USER", "-h", "-o", "%i|%j|%T"], check=False, capture_output=True, text=True)
688
+ for line in (proc.stdout or "").splitlines():
689
+ m = line_re.match(line.strip())
690
+ if not m:
691
+ continue
692
+ if m.group("job") != "download":
693
+ continue
694
+ if m.group("state") not in {"RUNNING", "PENDING", "CONFIGURING"}:
695
+ continue
696
+ live_download_jobs += expand_count(m.group("jobid"))
697
+ except Exception:
698
+ live_download_jobs = 0
699
+ print(f"{source_rows}|{terminal_rows}|{uploaded_rows}|{live_download_jobs}")
700
+ PY
701
+ }
702
+
703
  cleanup_stale_download_claims() {
704
  python - <<PY
705
  import os
706
  import subprocess
707
+ from datetime import datetime, timedelta
708
  from pathlib import Path
709
  claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
710
  claim_dir.mkdir(parents=True, exist_ok=True)
711
  removed = 0
712
+ now = datetime.now()
713
+ job_states = {}
714
+ base_states = {}
715
  try:
716
+ result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True)
717
+ for line in result.stdout.splitlines():
718
+ if not line.strip() or "|" not in line:
719
+ continue
720
+ job_key, state = line.split("|", 1)
721
+ job_key = job_key.strip()
722
+ state = state.strip().upper()
723
+ job_states[job_key] = state
724
+ base_states[job_key.split("_", 1)[0]] = state
725
  except Exception:
726
+ job_states = {}
727
+ base_states = {}
728
+ base_activity_cache = {}
729
+ def base_job_alive(job_id: str) -> bool:
730
+ if not job_id:
731
+ return False
732
+ if job_id in base_activity_cache:
733
+ return base_activity_cache[job_id]
734
+ alive = False
735
+ try:
736
+ proc = subprocess.run(["sacct", "-n", "-X", "-j", job_id, "--format=JobIDRaw,State", "-P"], check=False, capture_output=True, text=True)
737
+ terminal_tokens = ("COMPLETED", "FAILED", "CANCELLED", "TIMEOUT", "OUT_OF_MEMORY", "NODE_FAIL", "PREEMPTED", "BOOT_FAIL", "DEADLINE", "REVOKED")
738
+ for row in proc.stdout.splitlines():
739
+ if not row.strip() or "|" not in row:
740
+ continue
741
+ jid, state = row.split("|", 1)
742
+ jid = jid.strip()
743
+ state = state.strip().upper()
744
+ if not jid.startswith(job_id):
745
+ continue
746
+ if state and not any(tok in state for tok in terminal_tokens):
747
+ alive = True
748
+ break
749
+ except Exception:
750
+ alive = job_id in base_states
751
+ base_activity_cache[job_id] = alive
752
+ return alive
753
  for claim_path in claim_dir.glob("*.claim"):
754
  try:
755
  lines = claim_path.read_text(encoding="utf-8").splitlines()
 
757
  continue
758
  pid = None
759
  job_id = ""
760
+ task_id = ""
761
+ job_key = ""
762
+ submitted_at = ""
763
  for line in lines:
764
  if line.startswith("pid="):
765
  try:
 
768
  pid = None
769
  elif line.startswith("job_id="):
770
  job_id = line.split("=", 1)[1].strip()
771
+ elif line.startswith("task_id="):
772
+ task_id = line.split("=", 1)[1].strip()
773
+ elif line.startswith("job_key="):
774
+ job_key = line.split("=", 1)[1].strip()
775
+ elif line.startswith("submitted_at="):
776
+ submitted_at = line.split("=", 1)[1].strip()
777
  alive = False
778
+ if submitted_at:
779
+ try:
780
+ submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S")
781
+ if (now - submitted_dt) <= timedelta(seconds=int("$DOWNLOAD_CLAIM_GRACE_SECONDS")):
782
+ alive = True
783
+ except Exception:
784
+ pass
785
+ if (not alive) and job_key:
786
+ alive = job_key in job_states
787
+ elif (not alive) and job_id and task_id:
788
+ alive = f"{job_id}_{task_id}" in job_states
789
+ elif (not alive) and job_id:
790
+ alive = base_job_alive(job_id)
791
+ elif (not alive) and pid is not None:
792
  try:
793
  os.kill(pid, 0)
794
  alive = True
 
801
  PY
802
  }
803
 
804
+ cleanup_stale_download_partials() {
805
+ python - <<PY
806
+ import time
807
+ from pathlib import Path
808
+ claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
809
+ raw_dir = Path("$RAW_VIDEO_DIR")
810
+ timeout = int("$DOWNLOAD_PARTIAL_TIMEOUT_SECONDS")
811
+ now = time.time()
812
+ active_ids = set()
813
+ try:
814
+ claim_dir.mkdir(parents=True, exist_ok=True)
815
+ for claim_path in claim_dir.glob("*.claim"):
816
+ active_ids.add(claim_path.stem)
817
+ except Exception:
818
+ pass
819
+ removed = 0
820
+ if raw_dir.exists():
821
+ for path in raw_dir.iterdir():
822
+ if not path.is_file():
823
+ continue
824
+ name = path.name
825
+ if not (name.endswith(".part") or name.endswith(".ytdl") or ".mp4.part" in name or ".mp4.ytdl" in name or ".webm.part" in name or ".webm.ytdl" in name or ".mkv.part" in name or ".mkv.ytdl" in name or ".mov.part" in name or ".mov.ytdl" in name):
826
+ continue
827
+ video_id = name
828
+ for suffix in (".mp4.part", ".webm.part", ".mkv.part", ".mov.part", ".mp4.ytdl", ".webm.ytdl", ".mkv.ytdl", ".mov.ytdl", ".part", ".ytdl"):
829
+ if video_id.endswith(suffix):
830
+ video_id = video_id[:-len(suffix)]
831
+ break
832
+ if video_id in active_ids:
833
+ continue
834
+ try:
835
+ age = now - path.stat().st_mtime
836
+ except OSError:
837
+ continue
838
+ if age < timeout:
839
+ continue
840
+ try:
841
+ path.unlink()
842
+ removed += 1
843
+ except OSError:
844
+ pass
845
+ print(removed)
846
+ PY
847
+ }
848
+
849
  dir_size_bytes() {
850
  python - <<PY
851
  from pathlib import Path
 
865
  count_active_process_claims() {
866
  python - <<PY
867
  import subprocess
868
+ from datetime import datetime, timedelta
869
  from pathlib import Path
870
+ from datetime import datetime, timedelta
871
  claim_dir = Path("$STATE_ROOT/slurm/state/claims")
872
  claim_dir.mkdir(parents=True, exist_ok=True)
873
+ timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
874
+ now = datetime.now()
875
+ job_states = {}
876
+ base_states = {}
877
  try:
878
+ result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True)
879
+ for line in result.stdout.splitlines():
880
+ if not line.strip() or "|" not in line:
881
+ continue
882
+ job_key, state = line.split("|", 1)
883
+ job_key = job_key.strip()
884
+ state = state.strip().upper()
885
+ job_states[job_key] = state
886
+ base_states[job_key.split("_", 1)[0]] = state
887
  except Exception:
888
+ job_states = {}
889
+ base_states = {}
890
  count = 0
891
  for claim_path in claim_dir.glob("*.claim"):
892
  try:
893
  lines = claim_path.read_text(encoding="utf-8").splitlines()
894
  except OSError:
895
  continue
896
+ meta = {}
897
  for line in lines:
898
+ if "=" in line:
899
+ k, v = line.split("=", 1)
900
+ meta[k.strip()] = v.strip()
901
+ job_id = meta.get("job_id", "")
902
+ task_id = meta.get("task_id", "")
903
+ job_key = meta.get("job_key", "")
904
+ if not job_id:
905
+ claim_path.unlink(missing_ok=True)
906
+ continue
907
+ state = None
908
+ if job_key:
909
+ state = job_states.get(job_key)
910
+ elif task_id:
911
+ state = job_states.get(f"{job_id}_{task_id}")
912
  else:
913
+ state = base_states.get(job_id)
914
+ if not state:
915
+ claim_path.unlink(missing_ok=True)
916
+ continue
917
+ submitted_at = meta.get("submitted_at", "")
918
+ stale_pending = False
919
+ if state == "PENDING" and submitted_at:
920
+ try:
921
+ submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S")
922
+ stale_pending = (now - submitted_dt) > timedelta(seconds=timeout_seconds)
923
+ except Exception:
924
+ stale_pending = False
925
+ if stale_pending:
926
+ subprocess.run(["scancel", job_id], check=False)
927
  claim_path.unlink(missing_ok=True)
928
+ continue
929
+ count += 1
930
  print(count)
931
  PY
932
  }
933
 
934
+ cleanup_stale_process_jobs() {
935
+ python - <<PY
936
+ import subprocess
937
+ from datetime import datetime, timedelta
938
+ from pathlib import Path
939
+
940
+ claim_dir = Path("$STATE_ROOT/slurm/state/claims")
941
+ claim_dir.mkdir(parents=True, exist_ok=True)
942
+ timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
943
+ now = datetime.now()
944
+
945
+ bad_reason_tokens = (
946
+ "ReqNodeNotAvail",
947
+ "UnavailableNodes",
948
+ "NodeDown",
949
+ "PartitionDown",
950
+ "PartitionInactive",
951
+ "ReservationDeleted",
952
+ )
953
+ timed_pending_reasons = ("Priority", "Resources", "QOS")
954
+
955
+ squeue_rows = {}
956
+ try:
957
+ proc = subprocess.run(
958
+ ["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%i|%T|%R"],
959
+ check=False,
960
+ capture_output=True,
961
+ text=True,
962
+ )
963
+ for line in proc.stdout.splitlines():
964
+ if not line.strip() or "|" not in line:
965
+ continue
966
+ job_key, state, reason = line.split("|", 2)
967
+ squeue_rows[job_key.strip()] = (state.strip().upper(), reason.strip())
968
+ except Exception:
969
+ squeue_rows = {}
970
+
971
+ cancelled_jobs = set()
972
+ removed_claims = 0
973
+
974
+ for claim_path in claim_dir.glob("*.claim"):
975
+ try:
976
+ lines = claim_path.read_text(encoding="utf-8").splitlines()
977
+ except OSError:
978
+ continue
979
+ meta = {}
980
+ for line in lines:
981
+ if "=" in line:
982
+ k, v = line.split("=", 1)
983
+ meta[k.strip()] = v.strip()
984
+ job_id = meta.get("job_id", "")
985
+ task_id = meta.get("task_id", "")
986
+ job_key = meta.get("job_key", "") or (f"{job_id}_{task_id}" if job_id and task_id else "")
987
+ submitted_at = meta.get("submitted_at", "")
988
+ if not job_key:
989
+ claim_path.unlink(missing_ok=True)
990
+ removed_claims += 1
991
+ continue
992
+ row = squeue_rows.get(job_key)
993
+ if not row:
994
+ claim_path.unlink(missing_ok=True)
995
+ removed_claims += 1
996
+ continue
997
+ state, reason = row
998
+ should_cancel = False
999
+ if state == "PENDING":
1000
+ if any(tok in reason for tok in bad_reason_tokens):
1001
+ should_cancel = True
1002
+ elif submitted_at:
1003
+ try:
1004
+ submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S")
1005
+ if (now - submitted_dt) > timedelta(seconds=timeout_seconds):
1006
+ if any(tok in reason for tok in timed_pending_reasons) or not reason:
1007
+ should_cancel = True
1008
+ except Exception:
1009
+ pass
1010
+ if should_cancel:
1011
+ subprocess.run(["scancel", job_id], check=False)
1012
+ cancelled_jobs.add(job_id)
1013
+ claim_path.unlink(missing_ok=True)
1014
+ removed_claims += 1
1015
+
1016
+ print(f"{len(cancelled_jobs)}|{removed_claims}")
1017
+ PY
1018
+ }
1019
+
1020
+
1021
+ cleanup_orphan_pending_process_jobs() {
1022
+ python - <<PY
1023
+ import subprocess
1024
+ from datetime import datetime, timedelta
1025
+
1026
+ bad_reason_tokens = (
1027
+ "ReqNodeNotAvail",
1028
+ "UnavailableNodes",
1029
+ "NodeDown",
1030
+ "PartitionDown",
1031
+ "PartitionInactive",
1032
+ "ReservationDeleted",
1033
+ )
1034
+ timed_pending_reasons = ("Priority", "Resources", "QOS")
1035
+ timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
1036
+ now = datetime.now()
1037
+
1038
+ cancelled = set()
1039
+ try:
1040
+ proc = subprocess.run(
1041
+ ["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%A|%T|%R|%V"],
1042
+ check=False,
1043
+ capture_output=True,
1044
+ text=True,
1045
+ )
1046
+ for line in proc.stdout.splitlines():
1047
+ if not line.strip() or "|" not in line:
1048
+ continue
1049
+ job_id, state, reason, submit_time = line.split("|", 3)
1050
+ state = state.strip().upper()
1051
+ reason = reason.strip()
1052
+ if state != "PENDING":
1053
+ continue
1054
+ should_cancel = False
1055
+ if any(tok in reason for tok in bad_reason_tokens):
1056
+ should_cancel = True
1057
+ elif any(tok in reason for tok in timed_pending_reasons):
1058
+ submit_time = submit_time.strip()
1059
+ if submit_time and submit_time != "N/A":
1060
+ try:
1061
+ submitted_dt = datetime.strptime(submit_time, "%Y-%m-%dT%H:%M:%S")
1062
+ if (now - submitted_dt) > timedelta(seconds=timeout_seconds):
1063
+ should_cancel = True
1064
+ except Exception:
1065
+ pass
1066
+ if should_cancel:
1067
+ subprocess.run(["scancel", job_id.strip()], check=False)
1068
+ cancelled.add(job_id.strip())
1069
+ except Exception:
1070
+ pass
1071
+ print(len(cancelled))
1072
+ PY
1073
+ }
1074
+
1075
  count_complete_pending_upload() {
1076
  python - <<PY
1077
  import json
 
1126
  local iteration=0
1127
  while true; do
1128
  iteration=$((iteration + 1))
1129
+ local pruned stale_download_claims stale_download_partials stale_process_result stale_process_jobs stale_process_claims stale_orphan_process_jobs
1130
+ pruned="$(prune_processed_raw_videos 2>/dev/null || true)"
1131
+ [[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0
1132
+ stale_download_claims="$(cleanup_stale_download_claims 2>/dev/null || true)"
1133
+ [[ "$stale_download_claims" =~ ^[0-9]+$ ]] || stale_download_claims=0
1134
+ stale_download_partials="$(cleanup_stale_download_partials 2>/dev/null || true)"
1135
+ [[ "$stale_download_partials" =~ ^[0-9]+$ ]] || stale_download_partials=0
1136
+ stale_process_result="$(cleanup_stale_process_jobs 2>/dev/null || true)"
1137
+ IFS="|" read -r stale_process_jobs stale_process_claims <<< "$stale_process_result"
1138
+ [[ "$stale_process_jobs" =~ ^[0-9]+$ ]] || stale_process_jobs=0
1139
+ [[ "$stale_process_claims" =~ ^[0-9]+$ ]] || stale_process_claims=0
1140
+ stale_orphan_process_jobs="$(cleanup_orphan_pending_process_jobs 2>/dev/null || true)"
1141
+ [[ "$stale_orphan_process_jobs" =~ ^[0-9]+$ ]] || stale_orphan_process_jobs=0
1142
  local pending_download pending_process raw_video_bytes
1143
+ pending_download="$(count_pending_downloads 2>/dev/null || true)"
1144
+ [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
1145
+ pending_process="$(count_pending_process 2>/dev/null || true)"
1146
+ [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
1147
+ raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR" 2>/dev/null || true)"
1148
+ [[ "$raw_video_bytes" =~ ^[0-9]+$ ]] || raw_video_bytes=0
1149
+ echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims stale_download_partials=$stale_download_partials stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs"
1150
 
1151
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
1152
  echo "[download] reached max iterations: $MAX_ITERATIONS"
1153
  break
1154
  fi
1155
  if [[ "$pending_download" -eq 0 ]]; then
1156
+ run_sync_csv_stage >/dev/null 2>&1 || true
1157
+ pending_download="$(count_pending_downloads 2>/dev/null || true)"
1158
+ [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
1159
+ if [[ "$pending_download" -eq 0 ]]; then
1160
+ local stop_guard source_rows terminal_rows uploaded_rows live_download_jobs
1161
+ stop_guard="$(count_download_stop_guard 2>/dev/null || true)"
1162
+ IFS="|" read -r source_rows terminal_rows uploaded_rows live_download_jobs <<< "$stop_guard"
1163
+ [[ "$source_rows" =~ ^[0-9]+$ ]] || source_rows=0
1164
+ [[ "$terminal_rows" =~ ^[0-9]+$ ]] || terminal_rows=0
1165
+ [[ "$uploaded_rows" =~ ^[0-9]+$ ]] || uploaded_rows=0
1166
+ [[ "$live_download_jobs" =~ ^[0-9]+$ ]] || live_download_jobs=0
1167
+ if [[ "$live_download_jobs" -gt 0 || "$terminal_rows" -lt "$source_rows" ]]; then
1168
+ echo "[download] stop guard blocked exit: pending_download=$pending_download source_rows=$source_rows terminal_rows=$terminal_rows uploaded_rows=$uploaded_rows live_download_jobs=$live_download_jobs"
1169
+ sleep "$IDLE_SLEEP_SECONDS"
1170
+ continue
1171
+ fi
1172
+ echo "[download] nothing left to download"
1173
+ break
1174
+ fi
1175
  fi
1176
  if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then
1177
  echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT"
 
1187
  if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then
1188
  echo "[download] pipeline01 failed; retry after sleep"
1189
  sleep "$IDLE_SLEEP_SECONDS"
1190
+ else
1191
+ run_sync_csv_stage >/dev/null 2>&1 || true
1192
+ fi
1193
+
1194
+ pending_process="$(count_pending_process 2>/dev/null || true)"
1195
+ [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
1196
+ local active_process_claims
1197
+ active_process_claims="$(count_active_process_claims 2>/dev/null || true)"
1198
+ [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0
1199
+ if [[ "$pending_process" -ge "$MIN_PROCESS_START_BACKLOG" && "$active_process_claims" -eq 0 ]]; then
1200
+ if ! run_process_stage "$PROCESS_BATCH_SIZE"; then
1201
+ echo "[download] fallback process submit failed"
1202
+ else
1203
+ echo "[download] fallback process submit succeeded count=$RUN_PROCESS_STAGE_SUBMITTED_COUNT"
1204
+ fi
1205
  fi
1206
  done
1207
  }
 
1209
  process_loop() {
1210
  local iteration=0
1211
  local submitted_total=0
1212
+ echo "[process] loop started"
1213
  while true; do
1214
  iteration=$((iteration + 1))
1215
+ local pruned stale_process_result stale_process_jobs stale_process_claims stale_orphan_process_jobs
1216
+ pruned="$(prune_processed_raw_videos 2>/dev/null || true)"
1217
+ [[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0
1218
+ stale_process_result="$(cleanup_stale_process_jobs 2>/dev/null || true)"
1219
+ IFS="|" read -r stale_process_jobs stale_process_claims <<< "$stale_process_result"
1220
+ [[ "$stale_process_jobs" =~ ^[0-9]+$ ]] || stale_process_jobs=0
1221
+ [[ "$stale_process_claims" =~ ^[0-9]+$ ]] || stale_process_claims=0
1222
+ stale_orphan_process_jobs="$(cleanup_orphan_pending_process_jobs 2>/dev/null || true)"
1223
+ [[ "$stale_orphan_process_jobs" =~ ^[0-9]+$ ]] || stale_orphan_process_jobs=0
1224
  local pending_download pending_process active_process_claims remaining_limit cycle_limit
1225
+ pending_download="$(count_pending_downloads 2>/dev/null || true)"
1226
+ [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
1227
+ pending_process="$(count_pending_process 2>/dev/null || true)"
1228
+ [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
1229
+ active_process_claims="$(count_active_process_claims 2>/dev/null || true)"
1230
+ [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0
1231
+ echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process slurm_backlog=$active_process_claims submitted_total=$submitted_total pruned_raw_videos=$pruned stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs"
1232
 
1233
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
1234
  echo "[process] reached max iterations: $MAX_ITERATIONS"
 
1277
  sleep "$IDLE_SLEEP_SECONDS"
1278
  else
1279
  submitted_total=$((submitted_total + RUN_PROCESS_STAGE_SUBMITTED_COUNT))
1280
+ run_sync_csv_stage >/dev/null 2>&1 || true
1281
  fi
1282
  sleep "$IDLE_SLEEP_SECONDS"
1283
  done
 
1321
  if ! run_upload_stage "$require_target"; then
1322
  echo "[upload] pipeline03 failed; retry after sleep"
1323
  sleep "$IDLE_SLEEP_SECONDS"
1324
+ else
1325
+ run_sync_csv_stage >/dev/null 2>&1 || true
1326
  fi
1327
  done
1328
  }
scripts/benchmark_pipeline02_stream_vs_jpg.sh ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=dwpose-bench
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=8
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --mem=32G
8
+ #SBATCH --time=02:00:00
9
+ #SBATCH --output=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/dwpose_bench_%j.out
10
+ #SBATCH --error=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/dwpose_bench_%j.err
11
+
12
+ set -euo pipefail
13
+
14
+ ROOT_DIR="/cache/home/sf895/Sign-DWPose-2M"
15
+ RUNTIME_ROOT="/home/sf895/Sign-DWPose-2M-runtime"
16
+ CONDA_SH="/home/sf895/miniconda3/etc/profile.d/conda.sh"
17
+ CONDA_ENV="dwpose"
18
+ VIDEO_ID="${1:?video id required}"
19
+ FPS="${FPS:-24}"
20
+ RAW_VIDEO_DIR="$RUNTIME_ROOT/raw_video"
21
+ BENCH_ROOT="$RUNTIME_ROOT/bench_pipeline02/$VIDEO_ID"
22
+ OLD_DATASET_DIR="$BENCH_ROOT/jpg_spill"
23
+ NEW_DATASET_DIR="$BENCH_ROOT/stream"
24
+ OLD_STATS="$BENCH_ROOT/stats_old.npz"
25
+ NEW_STATS="$BENCH_ROOT/stats_new.npz"
26
+ TMP_ROOT="${SLURM_TMPDIR:-/tmp}"
27
+ VIDEO_PATH=""
28
+ for ext in mp4 mkv webm mov; do
29
+ candidate="$RAW_VIDEO_DIR/$VIDEO_ID.$ext"
30
+ if [[ -f "$candidate" ]]; then
31
+ VIDEO_PATH="$candidate"
32
+ break
33
+ fi
34
+ done
35
+ if [[ -z "$VIDEO_PATH" ]]; then
36
+ echo "Video not found for $VIDEO_ID" >&2
37
+ exit 1
38
+ fi
39
+
40
+ mkdir -p "$BENCH_ROOT"
41
+ rm -rf "$OLD_DATASET_DIR" "$NEW_DATASET_DIR"
42
+ rm -f "$OLD_STATS" "$NEW_STATS"
43
+
44
+ echo "video_id=$VIDEO_ID"
45
+ echo "video_path=$VIDEO_PATH"
46
+ echo "hostname=$(hostname)"
47
+ echo "cuda_visible_devices=${CUDA_VISIBLE_DEVICES:-unset}"
48
+ echo "fps=$FPS"
49
+ echo "tmp_root=$TMP_ROOT"
50
+
51
+ source "$CONDA_SH"
52
+
53
+ run_case() {
54
+ local mode="$1"
55
+ local dataset_dir="$2"
56
+ local stats_path="$3"
57
+ shift 3
58
+ local start end elapsed
59
+ start=$(python3 - <<'PY'
60
+ import time
61
+ print(time.perf_counter())
62
+ PY
63
+ )
64
+ conda run -n "$CONDA_ENV" python -u "$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py" \
65
+ --raw-video-dir "$RAW_VIDEO_DIR" \
66
+ --dataset-dir "$dataset_dir" \
67
+ --stats-npz "$stats_path" \
68
+ --fps "$FPS" \
69
+ --workers 1 \
70
+ --video-ids="$VIDEO_ID" \
71
+ --force \
72
+ --tmp-root "$TMP_ROOT" \
73
+ "$@"
74
+ end=$(python3 - <<'PY'
75
+ import time
76
+ print(time.perf_counter())
77
+ PY
78
+ )
79
+ elapsed=$(python3 - <<PY
80
+ start = float("$start")
81
+ end = float("$end")
82
+ print(f"{end-start:.3f}")
83
+ PY
84
+ )
85
+ local poses_npz="$dataset_dir/$VIDEO_ID/npz/poses.npz"
86
+ local complete_marker="$dataset_dir/$VIDEO_ID/npz/.complete"
87
+ local size_bytes=0
88
+ if [[ -f "$poses_npz" ]]; then
89
+ size_bytes=$(stat -c %s "$poses_npz")
90
+ fi
91
+ echo "benchmark_result mode=$mode elapsed_seconds=$elapsed poses_npz_bytes=$size_bytes complete=$([[ -f "$complete_marker" ]] && echo yes || echo no)"
92
+ }
93
+
94
+ run_case jpg_spill "$OLD_DATASET_DIR" "$OLD_STATS" --spill-jpg-frames
95
+ run_case stream "$NEW_DATASET_DIR" "$NEW_STATS"
scripts/pipeline02_extract_dwpose_from_video.py CHANGED
@@ -8,11 +8,20 @@ import sys
8
  import tempfile
9
  import time
10
  from pathlib import Path
11
- from typing import Dict, List, Sequence
12
 
13
  import numpy as np
 
14
  import torch
 
 
15
  from easy_dwpose import DWposeDetector
 
 
 
 
 
 
16
  from PIL import Image
17
 
18
 
@@ -30,6 +39,55 @@ VIDEO_EXTENSIONS = {".mp4", ".mkv", ".webm", ".mov"}
30
  COMPLETE_MARKER_NAME = ".complete"
31
 
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  def parse_args() -> argparse.Namespace:
34
  parser = argparse.ArgumentParser(
35
  description="Extract DWpose NPZ files from raw videos."
@@ -57,6 +115,71 @@ def parse_args() -> argparse.Namespace:
57
  action="store_false",
58
  help="Save one NPZ file per frame under the npz directory.",
59
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  return parser.parse_args()
61
 
62
 
@@ -88,7 +211,7 @@ def select_video_paths(args: argparse.Namespace) -> List[Path]:
88
  return selected
89
 
90
 
91
- def extract_frames(video_path: Path, frame_dir: Path, fps: int) -> None:
92
  command = [
93
  "ffmpeg",
94
  "-hide_banner",
@@ -104,6 +227,71 @@ def extract_frames(video_path: Path, frame_dir: Path, fps: int) -> None:
104
  subprocess.run(command, check=True)
105
 
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int) -> Dict[str, np.ndarray]:
108
  num_persons = int(pose_data["faces"].shape[0]) if "faces" in pose_data else 0
109
  payload: Dict[str, np.ndarray] = {
@@ -114,12 +302,12 @@ def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int)
114
  if num_persons == 0:
115
  return payload
116
 
117
- bodies = pose_data["bodies"].reshape(num_persons, 18, 2).astype(np.float32)
118
- body_scores = pose_data["body_scores"].astype(np.float32)
119
- faces = pose_data["faces"].astype(np.float32)
120
- face_scores = pose_data["faces_scores"].astype(np.float32)
121
- hands = pose_data["hands"].astype(np.float32)
122
- hand_scores = pose_data["hands_scores"].astype(np.float32)
123
 
124
  for person_idx in range(num_persons):
125
  prefix = f"person_{person_idx:03d}"
@@ -138,6 +326,243 @@ def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int)
138
  return payload
139
 
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  def process_video(
142
  video_path: Path,
143
  dataset_dir: Path,
@@ -146,6 +571,11 @@ def process_video(
146
  tmp_root: Path,
147
  force: bool,
148
  single_poses_npz: bool,
 
 
 
 
 
149
  ) -> None:
150
  video_id = video_path.stem
151
  output_npz_dir = dataset_dir / video_id / "npz"
@@ -159,24 +589,42 @@ def process_video(
159
  shutil.rmtree(output_npz_dir)
160
  output_npz_dir.mkdir(parents=True, exist_ok=True)
161
 
162
- tmp_root.mkdir(parents=True, exist_ok=True)
163
- frame_dir = Path(tempfile.mkdtemp(prefix=f"sign_dwpose_{video_id}_", dir=str(tmp_root)))
164
-
165
- try:
166
- extract_frames(video_path, frame_dir, fps)
167
- frame_paths = sorted(frame_dir.glob("*.jpg"))
168
- total_frames = len(frame_paths)
169
- print(f"{video_id}: extracted {total_frames} frames at {fps} fps")
170
-
171
- aggregated_payloads = []
172
- frame_widths = []
173
- frame_heights = []
174
-
175
- for frame_index, frame_path in enumerate(frame_paths, start=1):
176
- with Image.open(frame_path) as image:
177
- frame = image.convert("RGB")
178
- width, height = frame.size
179
- pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  payload = build_npz_payload(pose_data, width, height)
181
  if single_poses_npz:
182
  aggregated_payloads.append(payload)
@@ -184,27 +632,77 @@ def process_video(
184
  frame_heights.append(height)
185
  else:
186
  np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
187
-
188
- if frame_index == 1 or frame_index % 100 == 0 or frame_index == total_frames:
189
- print(f"{video_id}: processed {frame_index}/{total_frames} frames")
190
-
191
- if single_poses_npz:
192
- np.savez(
193
- poses_npz_path,
194
- video_id=np.asarray(video_id),
195
- fps=np.asarray(fps, dtype=np.int32),
196
- total_frames=np.asarray(total_frames, dtype=np.int32),
197
- frame_widths=np.asarray(frame_widths, dtype=np.int32),
198
- frame_heights=np.asarray(frame_heights, dtype=np.int32),
199
- frame_payloads=np.asarray(aggregated_payloads, dtype=object),
200
- )
201
-
202
- complete_marker.write_text(
203
- f"video_id={video_id}\nfps={fps}\nframes={total_frames}\noutput_mode={'single_poses_npy' if single_poses_npz else 'per_frame_npz'}\ncompleted_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n",
204
- encoding="utf-8",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  )
206
- finally:
207
- shutil.rmtree(frame_dir, ignore_errors=True)
 
 
 
208
 
209
 
210
  def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argparse.Namespace) -> None:
@@ -216,7 +714,18 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
216
  f"CUDA device rank {rank} is unavailable; visible device_count={device_count}"
217
  )
218
  device = f"cuda:{rank}"
219
- detector = DWposeDetector(device=device)
 
 
 
 
 
 
 
 
 
 
 
220
  print(f"Worker {rank}: device={device}, cuda_device_count={device_count}", flush=True)
221
 
222
  for index, video_path in enumerate(video_paths):
@@ -238,6 +747,11 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
238
  tmp_root=args.tmp_root,
239
  force=args.force,
240
  single_poses_npz=args.single_poses_npz,
 
 
 
 
 
241
  )
242
  update_video_stats(
243
  args.stats_npz,
@@ -284,7 +798,7 @@ def main() -> None:
284
  f"Requested workers={worker_count}, but only {visible_gpu_count} CUDA device(s) are visible"
285
  )
286
  worker_count = min(worker_count, len(video_paths))
287
- print(f"DWpose main: visible_cuda_devices={visible_gpu_count}, worker_count={worker_count}", flush=True)
288
 
289
  if worker_count == 1:
290
  worker(0, 1, video_paths, args)
 
8
  import tempfile
9
  import time
10
  from pathlib import Path
11
+ from typing import Dict, Iterator, List, Sequence, Tuple
12
 
13
  import numpy as np
14
+ import onnxruntime as ort
15
  import torch
16
+ import torch.nn.functional as F
17
+ import torchvision.ops as tv_ops
18
  from easy_dwpose import DWposeDetector
19
+ from easy_dwpose.body_estimation import resize_image
20
+ from easy_dwpose.body_estimation.detector import inference_detector, preprocess as detector_preprocess, demo_postprocess as detector_demo_postprocess
21
+ from easy_dwpose.body_estimation.pose import (
22
+ postprocess as pose_postprocess,
23
+ preprocess as pose_preprocess,
24
+ )
25
  from PIL import Image
26
 
27
 
 
39
  COMPLETE_MARKER_NAME = ".complete"
40
 
41
 
42
+ def build_optimized_providers(device: str, optimized_provider: str, cache_dir: Path):
43
+ device = str(device)
44
+ gpu_id = 0
45
+ if ":" in device:
46
+ gpu_id = int(device.split(":", 1)[1])
47
+ cache_dir.mkdir(parents=True, exist_ok=True)
48
+ if optimized_provider == "tensorrt":
49
+ providers = ["TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"]
50
+ provider_options = [
51
+ {
52
+ "device_id": str(gpu_id),
53
+ "trt_engine_cache_enable": "1",
54
+ "trt_engine_cache_path": str(cache_dir),
55
+ "trt_timing_cache_enable": "1",
56
+ "trt_fp16_enable": "1",
57
+ },
58
+ {"device_id": str(gpu_id)},
59
+ {},
60
+ ]
61
+ elif optimized_provider == "cuda":
62
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
63
+ provider_options = [
64
+ {"device_id": str(gpu_id)},
65
+ {},
66
+ ]
67
+ else:
68
+ providers = ["CPUExecutionProvider"]
69
+ provider_options = [{}]
70
+ return providers, provider_options
71
+
72
+
73
+ def create_detector(device: str, optimized_mode: bool, optimized_provider: str, tmp_root: Path) -> DWposeDetector:
74
+ detector = DWposeDetector(device=device)
75
+ if not optimized_mode:
76
+ return detector
77
+ providers, provider_options = build_optimized_providers(device, optimized_provider, tmp_root / "ort_trt_cache")
78
+ detector.pose_estimation.session_det = ort.InferenceSession(
79
+ "checkpoints/yolox_l.onnx",
80
+ providers=providers,
81
+ provider_options=provider_options,
82
+ )
83
+ detector.pose_estimation.session_pose = ort.InferenceSession(
84
+ "checkpoints/dw-ll_ucoco_384.onnx",
85
+ providers=providers,
86
+ provider_options=provider_options,
87
+ )
88
+ return detector
89
+
90
+
91
  def parse_args() -> argparse.Namespace:
92
  parser = argparse.ArgumentParser(
93
  description="Extract DWpose NPZ files from raw videos."
 
115
  action="store_false",
116
  help="Save one NPZ file per frame under the npz directory.",
117
  )
118
+ parser.add_argument(
119
+ "--stream-frames",
120
+ dest="stream_frames",
121
+ action="store_true",
122
+ default=True,
123
+ help="Decode frames directly from ffmpeg stdout without JPG spill (default).",
124
+ )
125
+ parser.add_argument(
126
+ "--spill-jpg-frames",
127
+ dest="stream_frames",
128
+ action="store_false",
129
+ help="Use legacy ffmpeg-to-JPG spill path for comparison/debugging.",
130
+ )
131
+ parser.add_argument(
132
+ "--optimized-mode",
133
+ dest="optimized_mode",
134
+ action="store_true",
135
+ default=True,
136
+ help="Enable optimized ndarray + batched pose inference path (default).",
137
+ )
138
+ parser.add_argument(
139
+ "--legacy-mode",
140
+ dest="optimized_mode",
141
+ action="store_false",
142
+ help="Disable optimized path and use legacy per-frame single-image inference.",
143
+ )
144
+ parser.add_argument(
145
+ "--optimized-frame-batch-size",
146
+ type=int,
147
+ default=8,
148
+ help="Frame micro-batch size for optimized pose inference.",
149
+ )
150
+ parser.add_argument(
151
+ "--optimized-detect-resolution",
152
+ type=int,
153
+ default=512,
154
+ help="Detect resolution used only in optimized mode.",
155
+ )
156
+ parser.add_argument(
157
+ "--optimized-frame-stride",
158
+ type=int,
159
+ default=1,
160
+ help="Process every Nth decoded frame in optimized mode.",
161
+ )
162
+ parser.add_argument(
163
+ "--optimized-provider",
164
+ choices=("tensorrt", "cuda", "cpu"),
165
+ default="cuda",
166
+ help="Execution provider used only in optimized mode.",
167
+ )
168
+ parser.add_argument(
169
+ "--optimized-gpu-pose-preprocess",
170
+ action="store_true",
171
+ help="Experimental: move pose crop affine/normalize to GPU in optimized mode.",
172
+ )
173
+ parser.add_argument(
174
+ "--optimized-gpu-detector-postprocess",
175
+ action="store_true",
176
+ help="Experimental: run detector postprocess and NMS on GPU in optimized mode.",
177
+ )
178
+ parser.add_argument(
179
+ "--optimized-io-binding",
180
+ action="store_true",
181
+ help="Experimental: use ONNX Runtime IO binding in optimized mode.",
182
+ )
183
  return parser.parse_args()
184
 
185
 
 
211
  return selected
212
 
213
 
214
+ def extract_frames_to_jpg(video_path: Path, frame_dir: Path, fps: int) -> None:
215
  command = [
216
  "ffmpeg",
217
  "-hide_banner",
 
227
  subprocess.run(command, check=True)
228
 
229
 
230
+ def probe_video_dimensions(video_path: Path) -> Tuple[int, int]:
231
+ command = [
232
+ "ffprobe",
233
+ "-v",
234
+ "error",
235
+ "-select_streams",
236
+ "v:0",
237
+ "-show_entries",
238
+ "stream=width,height",
239
+ "-of",
240
+ "csv=p=0:s=x",
241
+ str(video_path),
242
+ ]
243
+ proc = subprocess.run(command, check=True, capture_output=True, text=True)
244
+ dims = (proc.stdout or "").strip()
245
+ if "x" not in dims:
246
+ raise RuntimeError(f"Unable to parse ffprobe dimensions for {video_path.name}: {dims!r}")
247
+ width_s, height_s = dims.split("x", 1)
248
+ return int(width_s), int(height_s)
249
+
250
+
251
+ def iter_streamed_frames(video_path: Path, fps: int) -> Iterator[Tuple[int, np.ndarray, int, int]]:
252
+ width, height = probe_video_dimensions(video_path)
253
+ frame_bytes = width * height * 3
254
+ command = [
255
+ "ffmpeg",
256
+ "-hide_banner",
257
+ "-loglevel",
258
+ "error",
259
+ "-i",
260
+ str(video_path),
261
+ "-vf",
262
+ f"fps={fps}",
263
+ "-f",
264
+ "rawvideo",
265
+ "-pix_fmt",
266
+ "rgb24",
267
+ "pipe:1",
268
+ ]
269
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
270
+ assert proc.stdout is not None
271
+ try:
272
+ frame_index = 0
273
+ while True:
274
+ chunk = proc.stdout.read(frame_bytes)
275
+ if not chunk:
276
+ break
277
+ if len(chunk) != frame_bytes:
278
+ raise RuntimeError(
279
+ f"Short raw frame read for {video_path.name}: expected {frame_bytes} bytes, got {len(chunk)}"
280
+ )
281
+ frame_index += 1
282
+ frame_array = np.frombuffer(chunk, dtype=np.uint8).reshape((height, width, 3))
283
+ yield frame_index, frame_array, width, height
284
+ finally:
285
+ if proc.stdout:
286
+ proc.stdout.close()
287
+ stderr = proc.stderr.read().decode("utf-8", errors="replace") if proc.stderr else ""
288
+ if proc.stderr:
289
+ proc.stderr.close()
290
+ returncode = proc.wait()
291
+ if returncode != 0:
292
+ raise RuntimeError(f"ffmpeg raw frame stream failed for {video_path.name}: {stderr.strip()}")
293
+
294
+
295
  def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int) -> Dict[str, np.ndarray]:
296
  num_persons = int(pose_data["faces"].shape[0]) if "faces" in pose_data else 0
297
  payload: Dict[str, np.ndarray] = {
 
302
  if num_persons == 0:
303
  return payload
304
 
305
+ bodies = pose_data["bodies"].reshape(num_persons, 18, 2).astype(np.float32, copy=False)
306
+ body_scores = pose_data["body_scores"].astype(np.float32, copy=False)
307
+ faces = pose_data["faces"].astype(np.float32, copy=False)
308
+ face_scores = pose_data["faces_scores"].astype(np.float32, copy=False)
309
+ hands = pose_data["hands"].astype(np.float32, copy=False)
310
+ hand_scores = pose_data["hands_scores"].astype(np.float32, copy=False)
311
 
312
  for person_idx in range(num_persons):
313
  prefix = f"person_{person_idx:03d}"
 
326
  return payload
327
 
328
 
329
+ def run_session_outputs(
330
+ session: ort.InferenceSession,
331
+ input_array: np.ndarray,
332
+ use_io_binding: bool,
333
+ device_id: int,
334
+ ):
335
+ input_name = session.get_inputs()[0].name
336
+ output_names = [out.name for out in session.get_outputs()]
337
+ if not use_io_binding:
338
+ return session.run(output_names, {input_name: input_array})
339
+ io_binding = session.io_binding()
340
+ io_binding.bind_cpu_input(input_name, input_array)
341
+ for output_name in output_names:
342
+ io_binding.bind_output(output_name, device_type="cuda", device_id=device_id)
343
+ session.run_with_iobinding(io_binding)
344
+ return io_binding.copy_outputs_to_cpu()
345
+
346
+
347
+ def inference_detector_gpu_postprocess(
348
+ session: ort.InferenceSession,
349
+ ori_img: np.ndarray,
350
+ device: torch.device,
351
+ use_io_binding: bool,
352
+ device_id: int,
353
+ ) -> np.ndarray:
354
+ input_shape = (640, 640)
355
+ img, ratio = detector_preprocess(ori_img, input_shape)
356
+ outputs = run_session_outputs(session, img[None, :, :, :], use_io_binding, device_id)
357
+ predictions = detector_demo_postprocess(outputs[0], input_shape)[0]
358
+ pred = torch.from_numpy(np.ascontiguousarray(predictions)).to(device=device, dtype=torch.float32)
359
+ boxes = pred[:, :4]
360
+ score_obj = pred[:, 4]
361
+ cls_scores = pred[:, 5:]
362
+ if cls_scores.ndim == 1:
363
+ cls_scores = cls_scores.unsqueeze(1)
364
+ cls0 = score_obj * cls_scores[:, 0]
365
+ mask = cls0 > 0.1
366
+ if not torch.any(mask):
367
+ return np.array([])
368
+ boxes = boxes[mask]
369
+ cls0 = cls0[mask]
370
+ boxes_xyxy = torch.empty_like(boxes)
371
+ boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
372
+ boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
373
+ boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
374
+ boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
375
+ boxes_xyxy /= ratio
376
+ keep = tv_ops.nms(boxes_xyxy, cls0, 0.45)
377
+ if keep.numel() == 0:
378
+ return np.array([])
379
+ final_boxes = boxes_xyxy[keep]
380
+ final_scores = cls0[keep]
381
+ final_boxes = final_boxes[final_scores > 0.3]
382
+ if final_boxes.numel() == 0:
383
+ return np.array([])
384
+ return final_boxes.detach().cpu().numpy()
385
+
386
+
387
+ def optimized_detector_call(
388
+ detector: DWposeDetector,
389
+ frame: np.ndarray,
390
+ detect_resolution: int,
391
+ include_hands: bool = True,
392
+ include_face: bool = True,
393
+ ) -> Dict[str, np.ndarray]:
394
+ del include_hands, include_face
395
+ return optimized_process_frame_batch(detector, [(1, frame, 0, 0)], detect_resolution)[0][1]
396
+
397
+
398
+ def empty_pose_payload() -> Dict[str, np.ndarray]:
399
+ empty_f = np.zeros((0,), dtype=np.float32)
400
+ return {
401
+ "bodies": empty_f.reshape(0, 2),
402
+ "body_scores": empty_f.reshape(0, 18),
403
+ "hands": empty_f.reshape(0, 21, 2),
404
+ "hands_scores": empty_f.reshape(0, 21),
405
+ "faces": empty_f.reshape(0, 68, 2),
406
+ "faces_scores": empty_f.reshape(0, 68),
407
+ }
408
+
409
+
410
+ def format_pose_output(
411
+ detector: DWposeDetector,
412
+ keypoints: np.ndarray,
413
+ scores: np.ndarray,
414
+ width: int,
415
+ height: int,
416
+ ) -> Dict[str, np.ndarray]:
417
+ keypoints_info = np.concatenate((keypoints, scores[..., None]), axis=-1)
418
+ neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
419
+ neck[:, 2:4] = np.logical_and(
420
+ keypoints_info[:, 5, 2:4] > 0.3,
421
+ keypoints_info[:, 6, 2:4] > 0.3,
422
+ ).astype(int)
423
+ new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1)
424
+ mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]
425
+ openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
426
+ new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx]
427
+ keypoints_info = new_keypoints_info
428
+
429
+ keypoints = keypoints_info[..., :2]
430
+ scores = keypoints_info[..., 2]
431
+ return detector._format_pose(keypoints, scores, width, height)
432
+
433
+
434
+ def prepare_optimized_frame(
435
+ detector: DWposeDetector,
436
+ frame: np.ndarray,
437
+ detect_resolution: int,
438
+ ) -> Tuple[np.ndarray, np.ndarray, int, int]:
439
+ image = frame
440
+ if not isinstance(image, np.ndarray):
441
+ image = np.asarray(image.convert("RGB"))
442
+ image = resize_image(np.ascontiguousarray(image), target_resolution=detect_resolution)
443
+ height, width = image.shape[:2]
444
+ if getattr(detector, "_optimized_gpu_detector_postprocess", False):
445
+ det_result = inference_detector_gpu_postprocess(
446
+ detector.pose_estimation.session_det,
447
+ image,
448
+ detector._optimized_torch_device,
449
+ getattr(detector, "_optimized_io_binding", False),
450
+ getattr(detector, "_optimized_device_id", 0),
451
+ )
452
+ else:
453
+ det_result = inference_detector(detector.pose_estimation.session_det, image)
454
+ return image, det_result, width, height
455
+
456
+
457
+ def gpu_pose_preprocess(
458
+ image: np.ndarray,
459
+ out_bbox: np.ndarray,
460
+ input_size: Tuple[int, int],
461
+ device: torch.device,
462
+ ) -> Tuple[np.ndarray, List[np.ndarray], List[np.ndarray]]:
463
+ if len(out_bbox) == 0:
464
+ return np.empty((0, 3, input_size[1], input_size[0]), dtype=np.float32), [], []
465
+ img_t = torch.from_numpy(np.ascontiguousarray(image)).to(device=device, dtype=torch.float32)
466
+ img_t = img_t.permute(2, 0, 1).unsqueeze(0)
467
+ H, W = image.shape[:2]
468
+ out_w, out_h = input_size
469
+ boxes = np.asarray(out_bbox, dtype=np.float32)
470
+ x0 = boxes[:, 0]
471
+ y0 = boxes[:, 1]
472
+ x1 = boxes[:, 2]
473
+ y1 = boxes[:, 3]
474
+ centers = np.stack([(x0 + x1) * 0.5, (y0 + y1) * 0.5], axis=1).astype(np.float32)
475
+ scales = np.stack([(x1 - x0) * 1.25, (y1 - y0) * 1.25], axis=1).astype(np.float32)
476
+ aspect = out_w / out_h
477
+ w = scales[:, 0:1]
478
+ h = scales[:, 1:2]
479
+ scales = np.where(w > h * aspect, np.concatenate([w, w / aspect], axis=1), np.concatenate([h * aspect, h], axis=1)).astype(np.float32)
480
+ centers_t = torch.from_numpy(centers).to(device=device, dtype=torch.float32)
481
+ scales_t = torch.from_numpy(scales).to(device=device, dtype=torch.float32)
482
+ theta = torch.zeros((len(boxes), 2, 3), device=device, dtype=torch.float32)
483
+ theta[:, 0, 0] = scales_t[:, 0] / max(W - 1, 1)
484
+ theta[:, 1, 1] = scales_t[:, 1] / max(H - 1, 1)
485
+ theta[:, 0, 2] = 2.0 * centers_t[:, 0] / max(W - 1, 1) - 1.0
486
+ theta[:, 1, 2] = 2.0 * centers_t[:, 1] / max(H - 1, 1) - 1.0
487
+ grid = F.affine_grid(theta, size=(len(boxes), 3, out_h, out_w), align_corners=True)
488
+ crops = F.grid_sample(img_t.expand(len(boxes), -1, -1, -1), grid, mode='bilinear', padding_mode='zeros', align_corners=True)
489
+ mean = torch.tensor([123.675, 116.28, 103.53], device=device, dtype=torch.float32).view(1, 3, 1, 1)
490
+ std = torch.tensor([58.395, 57.12, 57.375], device=device, dtype=torch.float32).view(1, 3, 1, 1)
491
+ crops = (crops - mean) / std
492
+ return crops.detach().cpu().numpy(), [c for c in centers], [s for s in scales]
493
+
494
+
495
+ def optimized_process_frame_batch(
496
+ detector: DWposeDetector,
497
+ frames: Sequence[Tuple[int, np.ndarray, int, int]],
498
+ detect_resolution: int,
499
+ ) -> List[Tuple[int, Dict[str, np.ndarray], int, int]]:
500
+ session_pose = detector.pose_estimation.session_pose
501
+ model_input = session_pose.get_inputs()[0].shape
502
+ model_input_size = (model_input[3], model_input[2])
503
+
504
+ prepared = []
505
+ pose_inputs = []
506
+ all_centers = []
507
+ all_scales = []
508
+
509
+ for frame_index, frame, width, height in frames:
510
+ input_image, det_result, input_width, input_height = prepare_optimized_frame(detector, frame, detect_resolution)
511
+ if len(det_result) == 0:
512
+ prepared.append((frame_index, empty_pose_payload(), width, height, 0, input_width, input_height))
513
+ continue
514
+ torch_device = getattr(detector, "_optimized_torch_device", None)
515
+ if torch_device is not None:
516
+ batch_imgs, centers, scales = gpu_pose_preprocess(input_image, det_result, model_input_size, torch_device)
517
+ count = int(batch_imgs.shape[0])
518
+ prepared.append((frame_index, None, width, height, count, input_width, input_height))
519
+ if count:
520
+ pose_inputs.extend(list(batch_imgs))
521
+ all_centers.extend(centers)
522
+ all_scales.extend(scales)
523
+ continue
524
+ resized_imgs, centers, scales = pose_preprocess(input_image, det_result, model_input_size)
525
+ count = len(resized_imgs)
526
+ prepared.append((frame_index, None, width, height, count, input_width, input_height))
527
+ pose_inputs.extend([img.transpose(2, 0, 1) for img in resized_imgs])
528
+ all_centers.extend(centers)
529
+ all_scales.extend(scales)
530
+
531
+ if pose_inputs:
532
+ batch = np.stack(pose_inputs, axis=0).astype(np.float32, copy=False)
533
+ sess_input = {session_pose.get_inputs()[0].name: batch}
534
+ sess_output = [out.name for out in session_pose.get_outputs()]
535
+ simcc_x, simcc_y = session_pose.run(sess_output, sess_input)
536
+ batched_outputs = [(simcc_x[i : i + 1], simcc_y[i : i + 1]) for i in range(batch.shape[0])]
537
+ keypoints_all, scores_all = pose_postprocess(
538
+ batched_outputs,
539
+ model_input_size,
540
+ all_centers,
541
+ all_scales,
542
+ )
543
+ else:
544
+ keypoints_all = scores_all = None
545
+
546
+ results = []
547
+ offset = 0
548
+ for frame_index, pose_data, width, height, count, input_width, input_height in prepared:
549
+ if count == 0:
550
+ results.append((frame_index, pose_data, width, height))
551
+ continue
552
+ keypoints = keypoints_all[offset : offset + count]
553
+ scores = scores_all[offset : offset + count]
554
+ offset += count
555
+ results.append(
556
+ (
557
+ frame_index,
558
+ format_pose_output(detector, keypoints, scores, input_width, input_height),
559
+ width,
560
+ height,
561
+ )
562
+ )
563
+ return results
564
+
565
+
566
  def process_video(
567
  video_path: Path,
568
  dataset_dir: Path,
 
571
  tmp_root: Path,
572
  force: bool,
573
  single_poses_npz: bool,
574
+ stream_frames: bool,
575
+ optimized_mode: bool,
576
+ optimized_frame_batch_size: int,
577
+ optimized_detect_resolution: int,
578
+ optimized_frame_stride: int,
579
  ) -> None:
580
  video_id = video_path.stem
581
  output_npz_dir = dataset_dir / video_id / "npz"
 
589
  shutil.rmtree(output_npz_dir)
590
  output_npz_dir.mkdir(parents=True, exist_ok=True)
591
 
592
+ aggregated_payloads = []
593
+ frame_widths = []
594
+ frame_heights = []
595
+ frame_indices = []
596
+ total_frames = 0
597
+ decode_start = time.perf_counter()
598
+ process_start = decode_start
599
+
600
+ if stream_frames:
601
+ print(f"{video_id}: decoding mode=stream fps={fps} optimized={optimized_mode}")
602
+ frame_batch = []
603
+ batch_size = max(1, optimized_frame_batch_size)
604
+ frame_stride = max(1, optimized_frame_stride)
605
+ for frame_index, frame, width, height in iter_streamed_frames(video_path, fps):
606
+ total_frames = frame_index
607
+ if optimized_mode:
608
+ if ((frame_index - 1) % frame_stride) != 0:
609
+ continue
610
+ frame_batch.append((frame_index, frame, width, height))
611
+ if len(frame_batch) < batch_size:
612
+ continue
613
+ batch_results = optimized_process_frame_batch(detector, frame_batch, optimized_detect_resolution)
614
+ frame_batch = []
615
+ for result_index, pose_data, result_width, result_height in batch_results:
616
+ payload = build_npz_payload(pose_data, result_width, result_height)
617
+ if single_poses_npz:
618
+ aggregated_payloads.append(payload)
619
+ frame_widths.append(result_width)
620
+ frame_heights.append(result_height)
621
+ frame_indices.append(result_index)
622
+ else:
623
+ np.savez(output_npz_dir / f"{result_index:08d}.npz", **payload)
624
+ if result_index == 1 or result_index % 100 == 0:
625
+ print(f"{video_id}: processed {result_index} frames")
626
+ continue
627
+ pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
628
  payload = build_npz_payload(pose_data, width, height)
629
  if single_poses_npz:
630
  aggregated_payloads.append(payload)
 
632
  frame_heights.append(height)
633
  else:
634
  np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
635
+ if frame_index == 1 or frame_index % 100 == 0:
636
+ print(f"{video_id}: processed {frame_index} frames")
637
+ if optimized_mode and frame_batch:
638
+ for result_index, pose_data, result_width, result_height in optimized_process_frame_batch(detector, frame_batch, optimized_detect_resolution):
639
+ payload = build_npz_payload(pose_data, result_width, result_height)
640
+ if single_poses_npz:
641
+ aggregated_payloads.append(payload)
642
+ frame_widths.append(result_width)
643
+ frame_heights.append(result_height)
644
+ frame_indices.append(result_index)
645
+ else:
646
+ np.savez(output_npz_dir / f"{result_index:08d}.npz", **payload)
647
+ if result_index == 1 or result_index % 100 == 0:
648
+ print(f"{video_id}: processed {result_index} frames")
649
+ else:
650
+ print(f"{video_id}: decoding mode=jpg-spill fps={fps} optimized={optimized_mode}")
651
+ tmp_root.mkdir(parents=True, exist_ok=True)
652
+ frame_dir = Path(tempfile.mkdtemp(prefix=f"sign_dwpose_{video_id}_", dir=str(tmp_root)))
653
+ try:
654
+ extract_frames_to_jpg(video_path, frame_dir, fps)
655
+ frame_paths = sorted(frame_dir.glob("*.jpg"))
656
+ total_frames = len(frame_paths)
657
+ print(f"{video_id}: extracted {total_frames} frames at {fps} fps")
658
+ process_start = time.perf_counter()
659
+ for frame_index, frame_path in enumerate(frame_paths, start=1):
660
+ with Image.open(frame_path) as image:
661
+ frame = np.asarray(image.convert("RGB"))
662
+ height, width = frame.shape[:2]
663
+ if optimized_mode:
664
+ pose_data = optimized_detector_call(
665
+ detector,
666
+ frame,
667
+ optimized_detect_resolution,
668
+ include_hands=True,
669
+ include_face=True,
670
+ )
671
+ else:
672
+ pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
673
+ payload = build_npz_payload(pose_data, width, height)
674
+ if single_poses_npz:
675
+ aggregated_payloads.append(payload)
676
+ frame_widths.append(width)
677
+ frame_heights.append(height)
678
+ frame_indices.append(frame_index)
679
+ frame_indices.append(frame_index)
680
+ else:
681
+ np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
682
+ if frame_index == 1 or frame_index % 100 == 0 or frame_index == total_frames:
683
+ print(f"{video_id}: processed {frame_index}/{total_frames} frames")
684
+ finally:
685
+ shutil.rmtree(frame_dir, ignore_errors=True)
686
+
687
+ decode_process_elapsed = time.perf_counter() - decode_start
688
+ print(f"{video_id}: processed total_frames={total_frames} elapsed={decode_process_elapsed:.2f}s mode={'stream' if stream_frames else 'jpg-spill'} optimized={optimized_mode}")
689
+
690
+ if single_poses_npz:
691
+ np.savez(
692
+ poses_npz_path,
693
+ video_id=np.asarray(video_id),
694
+ fps=np.asarray(fps, dtype=np.int32),
695
+ total_frames=np.asarray(total_frames, dtype=np.int32),
696
+ frame_widths=np.asarray(frame_widths, dtype=np.int32),
697
+ frame_heights=np.asarray(frame_heights, dtype=np.int32),
698
+ frame_indices=np.asarray(frame_indices, dtype=np.int32),
699
+ frame_payloads=np.asarray(aggregated_payloads, dtype=object),
700
  )
701
+
702
+ complete_marker.write_text(
703
+ f"video_id={video_id}\nfps={fps}\nframes={total_frames}\noutput_mode={'single_poses_npy' if single_poses_npz else 'per_frame_npz'}\ndecode_mode={'stream' if stream_frames else 'jpg-spill'}\noptimized_mode={optimized_mode}\noptimized_detect_resolution={optimized_detect_resolution}\noptimized_frame_stride={optimized_frame_stride}\ncompleted_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n",
704
+ encoding="utf-8",
705
+ )
706
 
707
 
708
  def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argparse.Namespace) -> None:
 
714
  f"CUDA device rank {rank} is unavailable; visible device_count={device_count}"
715
  )
716
  device = f"cuda:{rank}"
717
+ detector = create_detector(
718
+ device=device,
719
+ optimized_mode=args.optimized_mode,
720
+ optimized_provider=args.optimized_provider,
721
+ tmp_root=args.tmp_root,
722
+ )
723
+ if args.optimized_mode:
724
+ detector._optimized_device_id = int(device.split(":", 1)[1]) if ":" in device else 0
725
+ detector._optimized_io_binding = bool(args.optimized_io_binding)
726
+ detector._optimized_gpu_detector_postprocess = bool(args.optimized_gpu_detector_postprocess)
727
+ if args.optimized_mode and (args.optimized_gpu_pose_preprocess or args.optimized_gpu_detector_postprocess):
728
+ detector._optimized_torch_device = torch.device(device)
729
  print(f"Worker {rank}: device={device}, cuda_device_count={device_count}", flush=True)
730
 
731
  for index, video_path in enumerate(video_paths):
 
747
  tmp_root=args.tmp_root,
748
  force=args.force,
749
  single_poses_npz=args.single_poses_npz,
750
+ stream_frames=args.stream_frames,
751
+ optimized_mode=args.optimized_mode,
752
+ optimized_frame_batch_size=args.optimized_frame_batch_size,
753
+ optimized_detect_resolution=args.optimized_detect_resolution,
754
+ optimized_frame_stride=args.optimized_frame_stride,
755
  )
756
  update_video_stats(
757
  args.stats_npz,
 
798
  f"Requested workers={worker_count}, but only {visible_gpu_count} CUDA device(s) are visible"
799
  )
800
  worker_count = min(worker_count, len(video_paths))
801
+ print(f"DWpose main: visible_cuda_devices={visible_gpu_count}, worker_count={worker_count}, stream_frames={args.stream_frames}, optimized_mode={args.optimized_mode}, optimized_frame_batch_size={args.optimized_frame_batch_size}, optimized_detect_resolution={args.optimized_detect_resolution}, optimized_frame_stride={args.optimized_frame_stride}, optimized_provider={args.optimized_provider}, optimized_gpu_pose_preprocess={args.optimized_gpu_pose_preprocess}, optimized_gpu_detector_postprocess={args.optimized_gpu_detector_postprocess}, optimized_io_binding={args.optimized_io_binding}", flush=True)
802
 
803
  if worker_count == 1:
804
  worker(0, 1, video_paths, args)
scripts/pipeline03_upload_to_huggingface.py CHANGED
@@ -54,7 +54,8 @@ def parse_args() -> argparse.Namespace:
54
  parser.add_argument("--repo-type", default="dataset")
55
  parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
56
  parser.add_argument("--target-folders", type=int, default=DEFAULT_TARGET_FOLDERS)
57
- parser.add_argument("--require-target-bytes", action="store_true")
 
58
  parser.add_argument("--dry-run", action="store_true")
59
  parser.add_argument("--upload-mode", choices=["git-ssh", "api", "api-stream"], default=os.environ.get("HF_UPLOAD_MODE", "api"))
60
  parser.add_argument("--git-clone-dir", type=Path, default=DEFAULT_GIT_CLONE_DIR)
@@ -372,7 +373,11 @@ def main() -> None:
372
  args = parse_args()
373
  print(f"[pipeline03] start upload_mode={args.upload_mode} repo_id={args.repo_id}", flush=True)
374
  progress = load_progress(args.progress_path)
375
- print(f"[pipeline03] loaded progress archives={len(progress.get("archives", {}))} uploaded_folders={len(progress.get("uploaded_folders", {}))}", flush=True)
 
 
 
 
376
  resolved_token = resolve_token(args.token)
377
  print(f"[pipeline03] token_present={bool(resolved_token)}", flush=True)
378
  api = HfApi(token=resolved_token) if args.upload_mode in {"api", "api-stream"} else None
@@ -380,8 +385,8 @@ def main() -> None:
380
 
381
  try:
382
  if args.upload_mode in {"api", "api-stream"}:
383
- print("[pipeline03] listing repo files via api", flush=True)
384
- repo_files = api.list_repo_files(repo_id=args.repo_id, repo_type=args.repo_type)
385
  else:
386
  print("[pipeline03] listing repo files via git", flush=True)
387
  repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type)
@@ -413,7 +418,8 @@ def main() -> None:
413
  else:
414
  remaining_folders = enrich_folder_sizes(remaining_folder_paths)
415
  remaining_bytes = total_batchable_bytes(remaining_folders)
416
- if args.require_target_bytes and remaining_bytes < args.target_bytes:
 
417
  print(
418
  f"Skip upload: only {format_size(remaining_bytes)} across {remaining_count} completed NPZ folders available, below targets {format_size(args.target_bytes)} or {args.target_folders} folders."
419
  )
@@ -434,8 +440,7 @@ def main() -> None:
434
  break
435
 
436
  args.archive_dir.mkdir(parents=True, exist_ok=True)
437
- update_many_video_stats_best_effort(
438
- args.stats_npz,
439
  args.status_journal_path,
440
  batch_names,
441
  upload_status="uploading",
 
54
  parser.add_argument("--repo-type", default="dataset")
55
  parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
56
  parser.add_argument("--target-folders", type=int, default=DEFAULT_TARGET_FOLDERS)
57
+ parser.add_argument("--require-target-bytes", action="store_true", default=True)
58
+ parser.add_argument("--allow-small-final-batch", action="store_true")
59
  parser.add_argument("--dry-run", action="store_true")
60
  parser.add_argument("--upload-mode", choices=["git-ssh", "api", "api-stream"], default=os.environ.get("HF_UPLOAD_MODE", "api"))
61
  parser.add_argument("--git-clone-dir", type=Path, default=DEFAULT_GIT_CLONE_DIR)
 
373
  args = parse_args()
374
  print(f"[pipeline03] start upload_mode={args.upload_mode} repo_id={args.repo_id}", flush=True)
375
  progress = load_progress(args.progress_path)
376
+ print(
377
+ f"[pipeline03] loaded progress archives={len(progress.get('archives', {}))} "
378
+ f"uploaded_folders={len(progress.get('uploaded_folders', {}))}",
379
+ flush=True,
380
+ )
381
  resolved_token = resolve_token(args.token)
382
  print(f"[pipeline03] token_present={bool(resolved_token)}", flush=True)
383
  api = HfApi(token=resolved_token) if args.upload_mode in {"api", "api-stream"} else None
 
385
 
386
  try:
387
  if args.upload_mode in {"api", "api-stream"}:
388
+ print("[pipeline03] skipping repo file listing for api mode; using local progress for archive index", flush=True)
389
+ repo_files = []
390
  else:
391
  print("[pipeline03] listing repo files via git", flush=True)
392
  repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type)
 
418
  else:
419
  remaining_folders = enrich_folder_sizes(remaining_folder_paths)
420
  remaining_bytes = total_batchable_bytes(remaining_folders)
421
+ require_target_bytes = args.require_target_bytes and not args.allow_small_final_batch
422
+ if require_target_bytes and remaining_bytes < args.target_bytes:
423
  print(
424
  f"Skip upload: only {format_size(remaining_bytes)} across {remaining_count} completed NPZ folders available, below targets {format_size(args.target_bytes)} or {args.target_folders} folders."
425
  )
 
440
  break
441
 
442
  args.archive_dir.mkdir(parents=True, exist_ok=True)
443
+ append_status_journal(
 
444
  args.status_journal_path,
445
  batch_names,
446
  upload_status="uploading",
scripts/runtime_status.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import csv
6
+ import json
7
+ import re
8
+ import subprocess
9
+ from collections import Counter
10
+ from pathlib import Path
11
+
12
+ VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
13
+ ARRAY_RANGE_RE = re.compile(r'^(\d+)_(\[(.+)\])$')
14
+ PROCESSED_REQUIRED_COLUMNS = {
15
+ 'video_id',
16
+ 'download_status',
17
+ 'process_status',
18
+ 'upload_status',
19
+ 'archive_name',
20
+ }
21
+
22
+
23
+ def run_command(cmd: list[str]) -> str:
24
+ try:
25
+ proc = subprocess.run(cmd, check=False, capture_output=True, text=True)
26
+ except OSError:
27
+ return ''
28
+ return (proc.stdout or '').strip()
29
+
30
+
31
+ def count_claims(directory: Path) -> int:
32
+ if not directory.exists():
33
+ return 0
34
+ return sum(1 for _ in directory.glob('*.claim'))
35
+
36
+
37
+ def count_complete(dataset_dir: Path) -> int:
38
+ if not dataset_dir.exists():
39
+ return 0
40
+ return sum(1 for _ in dataset_dir.glob('*/npz/.complete'))
41
+
42
+
43
+ def sum_file_sizes(paths: list[Path]) -> int:
44
+ total = 0
45
+ for path in paths:
46
+ try:
47
+ total += path.stat().st_size
48
+ except FileNotFoundError:
49
+ continue
50
+ return total
51
+
52
+
53
+ def count_uploaded(progress_path: Path) -> tuple[int, int]:
54
+ if not progress_path.exists():
55
+ return 0, 0
56
+ try:
57
+ data = json.loads(progress_path.read_text())
58
+ except Exception:
59
+ return 0, 0
60
+ archives = data.get('archives', {})
61
+ uploaded_folders = data.get('uploaded_folders', {})
62
+ return len(archives), len(uploaded_folders)
63
+
64
+
65
+ def expand_task_count(jobid_token: str) -> int:
66
+ m = ARRAY_RANGE_RE.match(jobid_token)
67
+ if not m:
68
+ return 1
69
+ body = m.group(3)
70
+ if '%' in body:
71
+ body = body.split('%', 1)[0]
72
+ total = 0
73
+ for part in body.split(','):
74
+ part = part.strip()
75
+ if not part:
76
+ continue
77
+ if '-' in part:
78
+ a, b = part.split('-', 1)
79
+ try:
80
+ total += int(b) - int(a) + 1
81
+ except ValueError:
82
+ total += 1
83
+ else:
84
+ total += 1
85
+ return max(total, 1)
86
+
87
+
88
+ def queue_status(username: str) -> dict[str, object]:
89
+ output = run_command(['squeue', '-u', username, '-h', '-o', '%i|%j|%T|%P'])
90
+ job_counts: Counter[str] = Counter()
91
+ partition_counts: Counter[str] = Counter()
92
+ running_dwpose = 0
93
+ running_download = 0
94
+ pending_download = 0
95
+ if output:
96
+ for line in output.splitlines():
97
+ parts = line.split('|')
98
+ if len(parts) != 4:
99
+ continue
100
+ jobid_token, job, state, partition = parts
101
+ count = expand_task_count(jobid_token)
102
+ job_counts[f'{job}|{state}'] += count
103
+ partition_counts[f'{job}|{partition}|{state}'] += count
104
+ if job == 'dwpose' and state == 'RUNNING':
105
+ running_dwpose += count
106
+ if job == 'download' and state == 'RUNNING':
107
+ running_download += count
108
+ if job == 'download' and state in {'PENDING', 'CONFIGURING'}:
109
+ pending_download += count
110
+ total_download = running_download + pending_download
111
+ return {
112
+ 'running_dwpose': running_dwpose,
113
+ 'running_download': running_download,
114
+ 'pending_download_jobs': pending_download,
115
+ 'total_download_jobs': total_download,
116
+ 'job_state_counts': dict(job_counts),
117
+ 'job_partition_state_counts': dict(partition_counts),
118
+ }
119
+
120
+
121
+ def filesystem_avail_bytes(path: Path) -> int:
122
+ try:
123
+ proc = subprocess.run(['df', '-B1', str(path)], check=False, capture_output=True, text=True)
124
+ lines = (proc.stdout or '').splitlines()
125
+ if len(lines) < 2:
126
+ return 0
127
+ fields = lines[1].split()
128
+ if len(fields) < 4:
129
+ return 0
130
+ return int(fields[3])
131
+ except Exception:
132
+ return 0
133
+
134
+
135
+ def human_bytes(num: int) -> str:
136
+ value = float(num)
137
+ for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
138
+ if value < 1024.0:
139
+ return f'{value:.1f}{unit}'
140
+ value /= 1024.0
141
+ return f'{value:.1f}EB'
142
+
143
+
144
+ def read_source_manifest_count(path: Path) -> int:
145
+ if not path.exists():
146
+ return 0
147
+ count = 0
148
+ with path.open('r', encoding='utf-8-sig', newline='') as f:
149
+ reader = csv.reader(f)
150
+ for row in reader:
151
+ if not row:
152
+ continue
153
+ if not (row[0] or '').strip():
154
+ continue
155
+ count += 1
156
+ return count
157
+
158
+
159
+ def read_processed_progress(path: Path) -> dict[str, object]:
160
+ result = {
161
+ 'csv_exists': path.exists(),
162
+ 'csv_ok': False,
163
+ 'csv_error': '',
164
+ 'processed_rows': 0,
165
+ 'download_ok_rows': 0,
166
+ 'download_skipped_rows': 0,
167
+ 'download_running_rows': 0,
168
+ 'download_pending_rows': 0,
169
+ 'process_ok_rows': 0,
170
+ 'process_running_rows': 0,
171
+ 'upload_uploaded_rows': 0,
172
+ }
173
+ if not path.exists():
174
+ result['csv_error'] = 'missing'
175
+ return result
176
+ try:
177
+ with path.open('r', encoding='utf-8-sig', newline='') as f:
178
+ reader = csv.DictReader(f)
179
+ fieldnames = list(reader.fieldnames or [])
180
+ missing = sorted(PROCESSED_REQUIRED_COLUMNS - set(fieldnames))
181
+ if missing:
182
+ result['csv_error'] = f'missing_columns:{",".join(missing)}'
183
+ return result
184
+ rows = list(reader)
185
+ result['processed_rows'] = len(rows)
186
+ for row in rows:
187
+ d = (row.get('download_status') or '').strip()
188
+ p = (row.get('process_status') or '').strip()
189
+ u = (row.get('upload_status') or '').strip()
190
+ if d == 'ok':
191
+ result['download_ok_rows'] += 1
192
+ elif d == 'skipped':
193
+ result['download_skipped_rows'] += 1
194
+ elif d == 'running':
195
+ result['download_running_rows'] += 1
196
+ else:
197
+ result['download_pending_rows'] += 1
198
+ if p == 'ok':
199
+ result['process_ok_rows'] += 1
200
+ elif p == 'running':
201
+ result['process_running_rows'] += 1
202
+ if u == 'uploaded':
203
+ result['upload_uploaded_rows'] += 1
204
+ result['csv_ok'] = True
205
+ return result
206
+ except Exception as exc:
207
+ result['csv_error'] = str(exc)
208
+ return result
209
+
210
+
211
+ def run_sync(runtime_root: Path) -> str:
212
+ sync_script = Path('/cache/home/sf895/Sign-DWPose-2M/scripts/sync_processed_csv_from_runtime.py')
213
+ if not sync_script.exists():
214
+ return 'missing_sync_script'
215
+ cmd = [
216
+ 'python3', str(sync_script),
217
+ '--source-metadata-csv', str(runtime_root / 'Sign-DWPose-2M-metadata_ori.csv'),
218
+ '--output-metadata-csv', str(runtime_root / 'Sign-DWPose-2M-metadata_processed.csv'),
219
+ '--raw-video-dir', str(runtime_root / 'raw_video'),
220
+ '--raw-caption-dir', str(runtime_root / 'raw_caption'),
221
+ '--raw-metadata-dir', str(runtime_root / 'raw_metadata'),
222
+ '--dataset-dir', str(runtime_root / 'dataset'),
223
+ '--progress-path', str(runtime_root / 'archive_upload_progress.json'),
224
+ '--status-journal-path', str(runtime_root / 'upload_status_journal.jsonl'),
225
+ ]
226
+ try:
227
+ proc = subprocess.run(cmd, check=False, capture_output=True, text=True)
228
+ except OSError as exc:
229
+ return f'error:{exc}'
230
+ if proc.returncode == 0:
231
+ return (proc.stdout or '').strip() or 'ok'
232
+ err = (proc.stderr or proc.stdout or '').strip()
233
+ return f'failed:{err}'
234
+
235
+
236
+ def main() -> None:
237
+ parser = argparse.ArgumentParser(description='Report Sign-DWPose runtime status.')
238
+ parser.add_argument('--runtime-root', default='/home/sf895/Sign-DWPose-2M-runtime')
239
+ parser.add_argument('--username', default='sf895')
240
+ parser.add_argument('--no-sync', action='store_true')
241
+ parser.add_argument('--json', action='store_true')
242
+ args = parser.parse_args()
243
+
244
+ runtime_root = Path(args.runtime_root)
245
+ raw_dir = runtime_root / 'raw_video'
246
+ dataset_dir = runtime_root / 'dataset'
247
+ claims_dir = runtime_root / 'slurm' / 'state' / 'claims'
248
+ download_claims_dir = runtime_root / 'slurm' / 'state' / 'download_claims'
249
+ progress_path = runtime_root / 'archive_upload_progress.json'
250
+ source_csv = runtime_root / 'Sign-DWPose-2M-metadata_ori.csv'
251
+ processed_csv = runtime_root / 'Sign-DWPose-2M-metadata_processed.csv'
252
+
253
+ sync_result = 'skipped'
254
+ if not args.no_sync:
255
+ sync_result = run_sync(runtime_root)
256
+
257
+ raw_complete: list[Path] = []
258
+ raw_temp: list[Path] = []
259
+ if raw_dir.exists():
260
+ for path in raw_dir.iterdir():
261
+ if not path.is_file():
262
+ continue
263
+ if path.suffix.lower() in VIDEO_EXTS:
264
+ raw_complete.append(path)
265
+ else:
266
+ raw_temp.append(path)
267
+
268
+ raw_size = sum_file_sizes(raw_complete)
269
+ runtime_size = 0
270
+ if runtime_root.exists():
271
+ for path in runtime_root.rglob('*'):
272
+ try:
273
+ if path.is_file():
274
+ runtime_size += path.stat().st_size
275
+ except FileNotFoundError:
276
+ continue
277
+
278
+ source_rows = read_source_manifest_count(source_csv)
279
+ progress = read_processed_progress(processed_csv)
280
+
281
+ payload = {
282
+ 'sync_result': sync_result,
283
+ 'download_normal': len(raw_temp) == 0,
284
+ 'raw_videos': len(raw_complete),
285
+ 'raw_temp_files': len(raw_temp),
286
+ 'sent_to_gpu': count_claims(claims_dir),
287
+ 'processed_complete': count_complete(dataset_dir),
288
+ 'active_downloads': count_claims(download_claims_dir),
289
+ 'uploaded_archives': 0,
290
+ 'uploaded_folders': 0,
291
+ 'raw_size_bytes': raw_size,
292
+ 'runtime_size_bytes': runtime_size,
293
+ 'filesystem_avail_bytes': filesystem_avail_bytes(runtime_root),
294
+ 'source_rows': source_rows,
295
+ 'csv_exists': progress['csv_exists'],
296
+ 'csv_ok': progress['csv_ok'],
297
+ 'csv_error': progress['csv_error'],
298
+ 'processed_rows': progress['processed_rows'],
299
+ 'download_ok_rows': progress['download_ok_rows'],
300
+ 'download_skipped_rows': progress['download_skipped_rows'],
301
+ 'download_running_rows': progress['download_running_rows'],
302
+ 'download_pending_rows': progress['download_pending_rows'],
303
+ 'process_ok_rows': progress['process_ok_rows'],
304
+ 'process_running_rows': progress['process_running_rows'],
305
+ 'upload_uploaded_rows': progress['upload_uploaded_rows'],
306
+ }
307
+ uploaded_archives, uploaded_folders = count_uploaded(progress_path)
308
+ payload['uploaded_archives'] = uploaded_archives
309
+ payload['uploaded_folders'] = uploaded_folders
310
+ payload.update(queue_status(args.username))
311
+ payload['csv_row_match'] = (payload['processed_rows'] == payload['source_rows']) if payload['csv_ok'] else False
312
+
313
+ if args.json:
314
+ print(json.dumps(payload, ensure_ascii=False, indent=2, sort_keys=True))
315
+ return
316
+
317
+ print(f"sync_result={payload['sync_result']}")
318
+ print(f"download_normal={payload['download_normal']}")
319
+ print(f"raw_videos={payload['raw_videos']}")
320
+ print(f"raw_temp_files={payload['raw_temp_files']}")
321
+ print(f"sent_to_gpu={payload['sent_to_gpu']}")
322
+ print(f"running_dwpose={payload['running_dwpose']}")
323
+ print(f"processed_complete={payload['processed_complete']}")
324
+ print(f"active_downloads={payload['active_downloads']}")
325
+ print(f"running_download_jobs={payload['running_download']}")
326
+ print(f"pending_download_jobs={payload['pending_download_jobs']}")
327
+ print(f"total_download_jobs={payload['total_download_jobs']}")
328
+ print(f"uploaded_archives={payload['uploaded_archives']}")
329
+ print(f"uploaded_folders={payload['uploaded_folders']}")
330
+ print(f"source_rows={payload['source_rows']}")
331
+ print(f"processed_rows={payload['processed_rows']}")
332
+ print(f"csv_ok={payload['csv_ok']}")
333
+ print(f"csv_row_match={payload['csv_row_match']}")
334
+ print(f"csv_error={payload['csv_error']}")
335
+ print(f"download_ok_rows={payload['download_ok_rows']}")
336
+ print(f"download_skipped_rows={payload['download_skipped_rows']}")
337
+ print(f"download_running_rows={payload['download_running_rows']}")
338
+ print(f"download_pending_rows={payload['download_pending_rows']}")
339
+ print(f"process_ok_rows={payload['process_ok_rows']}")
340
+ print(f"process_running_rows={payload['process_running_rows']}")
341
+ print(f"upload_uploaded_rows={payload['upload_uploaded_rows']}")
342
+ print(f"raw_size={human_bytes(payload['raw_size_bytes'])}")
343
+ print(f"runtime_size={human_bytes(payload['runtime_size_bytes'])}")
344
+ print(f"filesystem_avail={human_bytes(payload['filesystem_avail_bytes'])}")
345
+
346
+
347
+ if __name__ == '__main__':
348
+ main()
scripts/sync_processed_csv_from_runtime.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import csv
4
+ import json
5
+ from pathlib import Path
6
+ from typing import Dict, List, Tuple
7
+
8
+ DEFAULT_COLUMNS = [
9
+ 'video_id',
10
+ 'sign_language',
11
+ 'title',
12
+ 'duration_sec',
13
+ 'start_sec',
14
+ 'end_sec',
15
+ 'subtitle_languages',
16
+ 'subtitle_dir_path',
17
+ 'subtitle_en_source',
18
+ 'raw_video_path',
19
+ 'raw_metadata_path',
20
+ 'metadata_status',
21
+ 'subtitle_status',
22
+ 'download_status',
23
+ 'failure_count',
24
+ 'error',
25
+ 'processed_at',
26
+ 'subtitle_json_path',
27
+ 'subtitle_en',
28
+ 'subtitle_texts_json',
29
+ 'process_status',
30
+ 'upload_status',
31
+ 'local_cleanup_status',
32
+ 'archive_name',
33
+ ]
34
+
35
+ VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
36
+
37
+
38
+ def read_csv_rows(path: Path) -> Tuple[List[Dict[str, str]], List[str]]:
39
+ text = path.read_text(encoding='utf-8-sig')
40
+ lines = [line for line in text.splitlines() if line.strip()]
41
+ if not lines:
42
+ return [], []
43
+
44
+ first = next(csv.reader([lines[0]]), [])
45
+ first0 = first[0].strip() if first else ''
46
+ if first0 == 'video_id' or 'download_status' in first or 'process_status' in first:
47
+ with path.open('r', encoding='utf-8-sig', newline='') as f:
48
+ reader = csv.DictReader(f)
49
+ rows = [dict(r) for r in reader]
50
+ fieldnames = list(reader.fieldnames or [])
51
+ return rows, fieldnames
52
+
53
+ rows: List[Dict[str, str]] = []
54
+ with path.open('r', encoding='utf-8-sig', newline='') as f:
55
+ reader = csv.reader(f)
56
+ for parts in reader:
57
+ if not parts:
58
+ continue
59
+ video_id = (parts[0] or '').strip()
60
+ sign_language = (parts[1] or '').strip() if len(parts) > 1 else ''
61
+ if not video_id:
62
+ continue
63
+ rows.append({'video_id': video_id, 'sign_language': sign_language})
64
+ return rows, ['video_id', 'sign_language']
65
+
66
+
67
+ def write_csv_rows(path: Path, rows: List[Dict[str, str]], fieldnames: List[str]):
68
+ tmp = path.with_suffix(path.suffix + '.tmp')
69
+ with tmp.open('w', encoding='utf-8', newline='') as f:
70
+ writer = csv.DictWriter(f, fieldnames=fieldnames)
71
+ writer.writeheader()
72
+ for row in rows:
73
+ writer.writerow({k: row.get(k, '') for k in fieldnames})
74
+ tmp.replace(path)
75
+
76
+
77
+ def load_progress(progress_path: Path):
78
+ if not progress_path.exists():
79
+ return {}, {}
80
+ obj = json.loads(progress_path.read_text())
81
+ return obj.get('uploaded_folders', {}), obj.get('archives', {})
82
+
83
+
84
+ def load_journal(journal_path: Path):
85
+ updates: Dict[str, Dict[str, str]] = {}
86
+ if not journal_path.exists():
87
+ return updates
88
+ for line in journal_path.read_text(encoding='utf-8').splitlines():
89
+ line = line.strip()
90
+ if not line:
91
+ continue
92
+ try:
93
+ obj = json.loads(line)
94
+ except Exception:
95
+ continue
96
+ row_updates = {k: str(v) for k, v in (obj.get('updates') or {}).items()}
97
+ for vid in obj.get('video_ids') or []:
98
+ updates[str(vid)] = row_updates
99
+ return updates
100
+
101
+
102
+ def main():
103
+ ap = argparse.ArgumentParser()
104
+ ap.add_argument('--source-metadata-csv', type=Path, required=True)
105
+ ap.add_argument('--output-metadata-csv', type=Path, required=True)
106
+ ap.add_argument('--raw-video-dir', type=Path, required=True)
107
+ ap.add_argument('--raw-caption-dir', type=Path, required=True)
108
+ ap.add_argument('--raw-metadata-dir', type=Path, required=True)
109
+ ap.add_argument('--dataset-dir', type=Path, required=True)
110
+ ap.add_argument('--progress-path', type=Path, required=True)
111
+ ap.add_argument('--status-journal-path', type=Path, required=True)
112
+ args = ap.parse_args()
113
+
114
+ source_rows, source_fields = read_csv_rows(args.source_metadata_csv)
115
+ output_rows, output_fields = (read_csv_rows(args.output_metadata_csv) if args.output_metadata_csv.exists() else ([], []))
116
+ fields: List[str] = []
117
+ for col in DEFAULT_COLUMNS + source_fields + output_fields:
118
+ if col and col not in fields:
119
+ fields.append(col)
120
+
121
+ out_by_id = {r.get('video_id', '').strip(): r for r in output_rows if r.get('video_id', '').strip()}
122
+ rows: List[Dict[str, str]] = []
123
+ for src in source_rows:
124
+ vid = (src.get('video_id') or '').strip()
125
+ if not vid:
126
+ continue
127
+ merged = {k: src.get(k, '') for k in fields}
128
+ if vid in out_by_id:
129
+ for k in fields:
130
+ if k in out_by_id[vid]:
131
+ merged[k] = out_by_id[vid].get(k, '')
132
+ rows.append(merged)
133
+
134
+ raw_videos = {p.stem: p for p in args.raw_video_dir.iterdir() if p.is_file() and p.suffix.lower() in VIDEO_EXTS} if args.raw_video_dir.exists() else {}
135
+ raw_metadata = {p.stem: p for p in args.raw_metadata_dir.glob('*.json')} if args.raw_metadata_dir.exists() else {}
136
+ complete = {p.parent.parent.name for p in args.dataset_dir.glob('*/npz/.complete')} if args.dataset_dir.exists() else set()
137
+ process_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'claims'
138
+ download_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'download_claims'
139
+ process_claims = {p.stem for p in process_claims_dir.glob('*.claim')} if process_claims_dir.exists() else set()
140
+ download_claims = {p.stem for p in download_claims_dir.glob('*.claim')} if download_claims_dir.exists() else set()
141
+ uploaded_folders, _archives = load_progress(args.progress_path)
142
+ journal_updates = load_journal(args.status_journal_path)
143
+
144
+ for row in rows:
145
+ vid = (row.get('video_id') or '').strip()
146
+ if not vid:
147
+ continue
148
+ if vid in raw_metadata:
149
+ row['raw_metadata_path'] = str(raw_metadata[vid])
150
+ row['metadata_status'] = 'ok'
151
+ if vid in raw_videos:
152
+ row['raw_video_path'] = str(raw_videos[vid])
153
+ row['download_status'] = 'ok'
154
+ elif vid in download_claims and row.get('download_status', '') not in {'ok', 'skipped'}:
155
+ row['download_status'] = 'running'
156
+ if vid in complete:
157
+ row['process_status'] = 'ok'
158
+ elif vid in process_claims and row.get('process_status', '') != 'ok':
159
+ row['process_status'] = 'running'
160
+ if vid in uploaded_folders:
161
+ row['upload_status'] = 'uploaded'
162
+ row['archive_name'] = uploaded_folders[vid]
163
+ row['local_cleanup_status'] = 'deleted'
164
+ row['process_status'] = 'ok'
165
+ row['download_status'] = 'ok'
166
+ if not row.get('metadata_status'):
167
+ row['metadata_status'] = 'ok'
168
+ elif vid in complete:
169
+ row['upload_status'] = ''
170
+ row['archive_name'] = ''
171
+ row['local_cleanup_status'] = ''
172
+ elif vid in journal_updates:
173
+ for k, v in journal_updates[vid].items():
174
+ if k in {'upload_status', 'archive_name', 'local_cleanup_status'}:
175
+ row[k] = v
176
+
177
+ write_csv_rows(args.output_metadata_csv, rows, fields)
178
+ print(f'synced_rows={len(rows)}')
179
+
180
+ if __name__ == '__main__':
181
+ main()
scripts/visualize_dwpose_npz.py CHANGED
@@ -398,8 +398,8 @@ def render_pose_image(frame: Dict[str, object], draw_style: str, transparent: bo
398
 
399
  def save_frame_previews(npz_paths: Iterable[Path], single_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
400
  single_frame_dir.mkdir(parents=True, exist_ok=True)
401
- for npz_path in npz_paths:
402
- frame = load_npz_frame(npz_path, aggregated_index=index - 1 if npz_path.name == "poses.npz" else 0)
403
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
404
  image.save(single_frame_dir / f"{npz_path.stem}.png")
405
 
 
398
 
399
  def save_frame_previews(npz_paths: Iterable[Path], single_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
400
  single_frame_dir.mkdir(parents=True, exist_ok=True)
401
+ for preview_index, npz_path in enumerate(npz_paths, start=1):
402
+ frame = load_npz_frame(npz_path, aggregated_index=preview_index - 1 if npz_path.name == "poses.npz" else 0)
403
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
404
  image.save(single_frame_dir / f"{npz_path.stem}.png")
405
 
slurm/orchestrator_autorestart.slurm ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --partition=main
3
+ #SBATCH --job-name=sign-dwpose-launcher
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks=1
6
+ #SBATCH --cpus-per-task=1
7
+ #SBATCH --mem=512M
8
+ #SBATCH --time=12:30:00
9
+ #SBATCH --output=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/orchestrator_launcher_%j.out
10
+ #SBATCH --error=/home/sf895/Sign-DWPose-2M-runtime/slurm/logs/orchestrator_launcher_%j.err
11
+
12
+ set -euo pipefail
13
+
14
+ ROOT_DIR="${ROOT_DIR:-/cache/home/sf895/Sign-DWPose-2M}"
15
+ RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/Sign-DWPose-2M-runtime}"
16
+ ORCH_SUBMIT_SCRIPT="${ORCH_SUBMIT_SCRIPT:-$ROOT_DIR/reproduce_independently_slurm.sh}"
17
+ STOP_FILE="${STOP_FILE:-$RUNTIME_ROOT/STOP_AUTORESTART}"
18
+ ROTATE_SECONDS="${ROTATE_SECONDS:-43200}"
19
+ CHECK_INTERVAL_SECONDS="${CHECK_INTERVAL_SECONDS:-60}"
20
+ ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-12:00:00}"
21
+ LAUNCHER_SCRIPT="${LAUNCHER_SCRIPT:-$(readlink -f "$0")}"
22
+ LAUNCHER_PARTITION="${LAUNCHER_PARTITION:-main}"
23
+ LAUNCHER_ACCOUNT="${LAUNCHER_ACCOUNT:-}"
24
+ LAUNCHER_TIME="${LAUNCHER_TIME:-12:30:00}"
25
+ LAUNCHER_CPUS_PER_TASK="${LAUNCHER_CPUS_PER_TASK:-1}"
26
+ LAUNCHER_MEM="${LAUNCHER_MEM:-512M}"
27
+
28
+ mkdir -p "$RUNTIME_ROOT/slurm/logs"
29
+
30
+ now_ts() {
31
+ date '+%Y-%m-%d %H:%M:%S'
32
+ }
33
+
34
+ log() {
35
+ echo "[$(now_ts)] [launcher] $*"
36
+ }
37
+
38
+ current_orchestrator_ids() {
39
+ squeue -u "$USER" -h -o '%A|%j|%T' | awk -F'|' '$2=="sign-dwpose-orch" && ($3=="RUNNING" || $3=="PENDING" || $3=="CONFIGURING") {print $1}'
40
+ }
41
+
42
+ submit_orchestrator() {
43
+ log "submitting orchestration with ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME"
44
+ env -u SLURM_JOB_ID -u SLURM_JOB_NAME -u SLURM_JOB_NODELIST -u SLURM_ARRAY_JOB_ID -u SLURM_ARRAY_TASK_ID ORCHESTRATOR_TIME="$ORCHESTRATOR_TIME" bash "$ORCH_SUBMIT_SCRIPT"
45
+ }
46
+
47
+ cancel_orchestrators() {
48
+ local ids
49
+ ids="$(current_orchestrator_ids || true)"
50
+ if [[ -n "$ids" ]]; then
51
+ log "cancelling orchestrators: $(echo "$ids" | tr '\n' ' ')"
52
+ scancel $ids || true
53
+ fi
54
+ }
55
+
56
+ submit_next_launcher() {
57
+ if [[ -f "$STOP_FILE" ]]; then
58
+ log "stop file present; not submitting next launcher"
59
+ return 0
60
+ fi
61
+ local cmd=(sbatch "--partition=$LAUNCHER_PARTITION" "--time=$LAUNCHER_TIME" "--cpus-per-task=$LAUNCHER_CPUS_PER_TASK" "--mem=$LAUNCHER_MEM")
62
+ if [[ -n "$LAUNCHER_ACCOUNT" ]]; then
63
+ cmd+=("--account=$LAUNCHER_ACCOUNT")
64
+ fi
65
+ cmd+=("--export=ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,ORCH_SUBMIT_SCRIPT=$ORCH_SUBMIT_SCRIPT,STOP_FILE=$STOP_FILE,ROTATE_SECONDS=$ROTATE_SECONDS,CHECK_INTERVAL_SECONDS=$CHECK_INTERVAL_SECONDS,ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME,LAUNCHER_SCRIPT=$LAUNCHER_SCRIPT,LAUNCHER_PARTITION=$LAUNCHER_PARTITION,LAUNCHER_ACCOUNT=$LAUNCHER_ACCOUNT,LAUNCHER_TIME=$LAUNCHER_TIME,LAUNCHER_CPUS_PER_TASK=$LAUNCHER_CPUS_PER_TASK,LAUNCHER_MEM=$LAUNCHER_MEM")
66
+ cmd+=("$LAUNCHER_SCRIPT")
67
+ log "submitting next launcher"
68
+ "${cmd[@]}"
69
+ }
70
+
71
+ elapsed=0
72
+ if [[ -f "$STOP_FILE" ]]; then
73
+ log "stop file present at startup; exiting"
74
+ exit 0
75
+ fi
76
+
77
+ if [[ -z "$(current_orchestrator_ids || true)" ]]; then
78
+ submit_orchestrator
79
+ else
80
+ log "existing orchestration detected; not submitting a duplicate"
81
+ fi
82
+
83
+ while (( elapsed < ROTATE_SECONDS )); do
84
+ if [[ -f "$STOP_FILE" ]]; then
85
+ log "stop file detected during monitoring; exiting without resubmitting"
86
+ exit 0
87
+ fi
88
+ if [[ -z "$(current_orchestrator_ids || true)" ]]; then
89
+ log "no live orchestration detected; submitting a replacement"
90
+ submit_orchestrator
91
+ fi
92
+ sleep "$CHECK_INTERVAL_SECONDS"
93
+ elapsed=$((elapsed + CHECK_INTERVAL_SECONDS))
94
+ done
95
+
96
+ if [[ -f "$STOP_FILE" ]]; then
97
+ log "stop file present at rotation boundary; exiting"
98
+ exit 0
99
+ fi
100
+
101
+ log "rotation boundary reached; restarting orchestration and handing off to next launcher"
102
+ cancel_orchestrators
103
+ sleep 5
104
+ submit_orchestrator
105
+ submit_next_launcher
106
+ log "handoff complete; exiting"
slurm/process_download_array.slurm CHANGED
@@ -52,9 +52,21 @@ fi
52
 
53
  mkdir -p "$DOWNLOAD_CLAIM_DIR"
54
  CLAIM_PATH="$DOWNLOAD_CLAIM_DIR/${VIDEO_ID}.claim"
 
 
 
 
 
 
 
 
 
 
 
55
  cleanup_claim() {
56
  rm -f "$CLAIM_PATH"
57
  }
 
58
  trap cleanup_claim EXIT
59
 
60
  echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) video_id=$VIDEO_ID"
@@ -78,7 +90,7 @@ cmd=(python "$PIPELINE01"
78
  --stats-npz "$STATS_NPZ"
79
  --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
80
  --limit 1
81
- --video-ids "$VIDEO_ID"
82
  )
83
  if [[ "${FORCE_METADATA:-0}" == "1" ]]; then
84
  cmd+=(--force-metadata)
 
52
 
53
  mkdir -p "$DOWNLOAD_CLAIM_DIR"
54
  CLAIM_PATH="$DOWNLOAD_CLAIM_DIR/${VIDEO_ID}.claim"
55
+ write_claim() {
56
+ cat > "$CLAIM_PATH" <<CLAIM
57
+ job_id=${SLURM_ARRAY_JOB_ID:-${SLURM_JOB_ID:-}}
58
+ task_id=${SLURM_ARRAY_TASK_ID}
59
+ job_key=${SLURM_ARRAY_JOB_ID:-${SLURM_JOB_ID:-}}_${SLURM_ARRAY_TASK_ID}
60
+ video_id=${VIDEO_ID}
61
+ host=$(hostname)
62
+ pid=$$
63
+ claimed_at=$(date '+%F %T')
64
+ CLAIM
65
+ }
66
  cleanup_claim() {
67
  rm -f "$CLAIM_PATH"
68
  }
69
+ write_claim
70
  trap cleanup_claim EXIT
71
 
72
  echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) video_id=$VIDEO_ID"
 
90
  --stats-npz "$STATS_NPZ"
91
  --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
92
  --limit 1
93
+ --video-ids="$VIDEO_ID"
94
  )
95
  if [[ "${FORCE_METADATA:-0}" == "1" ]]; then
96
  cmd+=(--force-metadata)
slurm/submit_download_slurm.sh CHANGED
@@ -26,8 +26,9 @@ MEMORY="4G"
26
  LIMIT=""
27
  ARRAY_PARALLEL=""
28
  MAX_BACKLOG_VIDEOS="180"
29
- MAX_ACTIVE_DOWNLOADS="30"
30
- WORKERS="4"
 
31
  VIDEO_IDS=()
32
  FORCE_METADATA=0
33
  FORCE_SUBTITLES=0
@@ -50,12 +51,12 @@ Options:
50
  --account NAME Optional Slurm account
51
  --time HH:MM:SS Default: 04:00:00
52
  --cpus-per-task N Default: 1
53
- --mem SIZE Default: 4G
54
  --limit N Only submit the first N pending, unclaimed videos this cycle
55
  --array-parallel N Add a %N cap to the array
56
  --max-backlog-videos N Max raw backlog + active download claims allowed. Default: 180
57
  --max-active-downloads N Max active download claims allowed at once. Default: 30
58
- --workers N Max download tasks to submit in one cycle. Default: 4
59
  --claim-dir DIR Download claim directory
60
  --csv-lock-path PATH CSV lock path
61
  --video-ids ID [ID ...] Restrict this cycle to specific videos
@@ -117,10 +118,12 @@ fi
117
  TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
118
  BASE_MANIFEST="$MANIFEST_DIR/pending_download_${TIMESTAMP}.txt"
119
  ACTIVE_JOBS_FILE="$STATE_DIR/active_download_jobs_${TIMESTAMP}.txt"
 
120
 
121
  squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
 
122
 
123
- SELECTED_COUNT="$(python - "$SOURCE_METADATA_CSV" "$OUTPUT_METADATA_CSV" "$RAW_VIDEO_DIR" "$DOWNLOAD_CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" "$MAX_ACTIVE_DOWNLOADS" "$LIMIT" "$WORKERS" "$BASE_MANIFEST" "${VIDEO_IDS[*]:-}" <<'PY'
124
  import csv
125
  import sys
126
  from pathlib import Path
@@ -130,12 +133,13 @@ processed_csv = Path(sys.argv[2])
130
  raw_video_dir = Path(sys.argv[3])
131
  claim_dir = Path(sys.argv[4])
132
  active_jobs_path = Path(sys.argv[5])
133
- max_backlog = int(sys.argv[6])
134
- max_active_downloads = int(sys.argv[7])
135
- limit_arg = sys.argv[8]
136
- workers = int(sys.argv[9])
137
- manifest_path = Path(sys.argv[10])
138
- video_ids_joined = sys.argv[11].strip()
 
139
  limit = int(limit_arg) if limit_arg else None
140
  video_filter = set(video_ids_joined.split()) if video_ids_joined else None
141
 
@@ -143,6 +147,9 @@ video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
143
  active_jobs = set()
144
  if active_jobs_path.exists():
145
  active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding='utf-8').splitlines() if line.strip()}
 
 
 
146
 
147
  claim_dir.mkdir(parents=True, exist_ok=True)
148
  active_claims = set()
@@ -153,6 +160,7 @@ for claim_path in claim_dir.glob('*.claim'):
153
  continue
154
  job_id = ''
155
  pid = None
 
156
  for line in lines:
157
  if line.startswith('job_id='):
158
  job_id = line.split('=', 1)[1].strip()
@@ -161,10 +169,20 @@ for claim_path in claim_dir.glob('*.claim'):
161
  pid = int(line.split('=', 1)[1].strip())
162
  except ValueError:
163
  pid = None
 
 
164
  alive = False
165
- if job_id:
 
 
 
 
 
 
 
 
166
  alive = job_id in active_jobs
167
- elif pid is not None:
168
  try:
169
  import os
170
  os.kill(pid, 0)
@@ -184,8 +202,9 @@ if raw_video_dir.exists():
184
  raw_backlog += 1
185
  existing_raw.add(path.stem)
186
 
 
187
  remaining_slots = max(0, max_backlog - raw_backlog - len(active_claims))
188
- remaining_slots = min(remaining_slots, max(0, max_active_downloads - len(active_claims)))
189
  remaining_slots = min(remaining_slots, workers)
190
  if limit is not None:
191
  remaining_slots = min(remaining_slots, limit)
@@ -223,7 +242,7 @@ manifest_path.write_text(''.join(f'{video_id}\n' for video_id in selected), enco
223
  print(len(selected))
224
  PY
225
  )"
226
- rm -f "$ACTIVE_JOBS_FILE"
227
 
228
  if [[ "$SELECTED_COUNT" == "0" ]]; then
229
  echo "No pending videos to download, or download backlog cap already reached."
@@ -258,13 +277,17 @@ cmd+=("$SLURM_SCRIPT")
258
  JOB_ID="$("${cmd[@]}")"
259
  echo "Submitted download array job: $JOB_ID"
260
 
 
261
  while IFS= read -r video_id; do
262
  [[ -z "$video_id" ]] && continue
263
  cat > "$DOWNLOAD_CLAIM_DIR/${video_id}.claim" <<CLAIM
264
  job_id=$JOB_ID
 
 
265
  video_id=$video_id
266
  submitted_at=$(date '+%F %T')
267
  CLAIM
 
268
  done < "$BASE_MANIFEST"
269
 
270
  echo "SUBMITTED_DOWNLOAD_COUNT=$SELECTED_COUNT"
 
26
  LIMIT=""
27
  ARRAY_PARALLEL=""
28
  MAX_BACKLOG_VIDEOS="180"
29
+ MAX_ACTIVE_DOWNLOADS="60"
30
+ DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}"
31
+ WORKERS="60"
32
  VIDEO_IDS=()
33
  FORCE_METADATA=0
34
  FORCE_SUBTITLES=0
 
51
  --account NAME Optional Slurm account
52
  --time HH:MM:SS Default: 04:00:00
53
  --cpus-per-task N Default: 1
54
+ --mem SIZE Default: 60G
55
  --limit N Only submit the first N pending, unclaimed videos this cycle
56
  --array-parallel N Add a %N cap to the array
57
  --max-backlog-videos N Max raw backlog + active download claims allowed. Default: 180
58
  --max-active-downloads N Max active download claims allowed at once. Default: 30
59
+ --workers N Max download tasks to submit in one cycle. Default: 60
60
  --claim-dir DIR Download claim directory
61
  --csv-lock-path PATH CSV lock path
62
  --video-ids ID [ID ...] Restrict this cycle to specific videos
 
118
  TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
119
  BASE_MANIFEST="$MANIFEST_DIR/pending_download_${TIMESTAMP}.txt"
120
  ACTIVE_JOBS_FILE="$STATE_DIR/active_download_jobs_${TIMESTAMP}.txt"
121
+ ACTIVE_DOWNLOAD_TASKS_FILE="$STATE_DIR/active_download_tasks_${TIMESTAMP}.txt"
122
 
123
  squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
124
+ squeue -h -u "$USER" -n download -t RUNNING,PENDING,CONFIGURING -o "%i" > "$ACTIVE_DOWNLOAD_TASKS_FILE"
125
 
126
+ SELECTED_COUNT="$(python - "$SOURCE_METADATA_CSV" "$OUTPUT_METADATA_CSV" "$RAW_VIDEO_DIR" "$DOWNLOAD_CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$ACTIVE_DOWNLOAD_TASKS_FILE" "$MAX_BACKLOG_VIDEOS" "$MAX_ACTIVE_DOWNLOADS" "$LIMIT" "$WORKERS" "$BASE_MANIFEST" "${VIDEO_IDS[*]:-}" <<'PY'
127
  import csv
128
  import sys
129
  from pathlib import Path
 
133
  raw_video_dir = Path(sys.argv[3])
134
  claim_dir = Path(sys.argv[4])
135
  active_jobs_path = Path(sys.argv[5])
136
+ active_download_tasks_path = Path(sys.argv[6])
137
+ max_backlog = int(sys.argv[7])
138
+ max_active_downloads = int(sys.argv[8])
139
+ limit_arg = sys.argv[9]
140
+ workers = int(sys.argv[10])
141
+ manifest_path = Path(sys.argv[11])
142
+ video_ids_joined = sys.argv[12].strip()
143
  limit = int(limit_arg) if limit_arg else None
144
  video_filter = set(video_ids_joined.split()) if video_ids_joined else None
145
 
 
147
  active_jobs = set()
148
  if active_jobs_path.exists():
149
  active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding='utf-8').splitlines() if line.strip()}
150
+ active_download_tasks = 0
151
+ if active_download_tasks_path.exists():
152
+ active_download_tasks = sum(1 for line in active_download_tasks_path.read_text(encoding='utf-8').splitlines() if line.strip())
153
 
154
  claim_dir.mkdir(parents=True, exist_ok=True)
155
  active_claims = set()
 
160
  continue
161
  job_id = ''
162
  pid = None
163
+ submitted_at = ''
164
  for line in lines:
165
  if line.startswith('job_id='):
166
  job_id = line.split('=', 1)[1].strip()
 
169
  pid = int(line.split('=', 1)[1].strip())
170
  except ValueError:
171
  pid = None
172
+ elif line.startswith('submitted_at='):
173
+ submitted_at = line.split('=', 1)[1].strip()
174
  alive = False
175
+ if submitted_at:
176
+ try:
177
+ from datetime import datetime, timedelta
178
+ submitted_dt = datetime.strptime(submitted_at, '%Y-%m-%d %H:%M:%S')
179
+ if (datetime.now() - submitted_dt) <= timedelta(seconds=int("$DOWNLOAD_CLAIM_GRACE_SECONDS")):
180
+ alive = True
181
+ except Exception:
182
+ pass
183
+ if (not alive) and job_id:
184
  alive = job_id in active_jobs
185
+ elif (not alive) and pid is not None:
186
  try:
187
  import os
188
  os.kill(pid, 0)
 
202
  raw_backlog += 1
203
  existing_raw.add(path.stem)
204
 
205
+ effective_active_downloads = max(active_download_tasks, len(active_claims))
206
  remaining_slots = max(0, max_backlog - raw_backlog - len(active_claims))
207
+ remaining_slots = min(remaining_slots, max(0, max_active_downloads - effective_active_downloads))
208
  remaining_slots = min(remaining_slots, workers)
209
  if limit is not None:
210
  remaining_slots = min(remaining_slots, limit)
 
242
  print(len(selected))
243
  PY
244
  )"
245
+ rm -f "$ACTIVE_JOBS_FILE" "$ACTIVE_DOWNLOAD_TASKS_FILE"
246
 
247
  if [[ "$SELECTED_COUNT" == "0" ]]; then
248
  echo "No pending videos to download, or download backlog cap already reached."
 
277
  JOB_ID="$("${cmd[@]}")"
278
  echo "Submitted download array job: $JOB_ID"
279
 
280
+ task_id=0
281
  while IFS= read -r video_id; do
282
  [[ -z "$video_id" ]] && continue
283
  cat > "$DOWNLOAD_CLAIM_DIR/${video_id}.claim" <<CLAIM
284
  job_id=$JOB_ID
285
+ task_id=$task_id
286
+ job_key=${JOB_ID}_${task_id}
287
  video_id=$video_id
288
  submitted_at=$(date '+%F %T')
289
  CLAIM
290
+ task_id=$((task_id + 1))
291
  done < "$BASE_MANIFEST"
292
 
293
  echo "SUBMITTED_DOWNLOAD_COUNT=$SELECTED_COUNT"
slurm/submit_dwpose_slurm.sh CHANGED
@@ -216,13 +216,17 @@ echo "Submitting now: $TARGET_COUNT"
216
  write_claims() {
217
  local manifest="$1"
218
  local job_id="$2"
 
219
  while IFS= read -r video_id; do
220
  [[ -z "$video_id" ]] && continue
221
  cat > "$CLAIM_DIR/${video_id}.claim" <<CLAIM
222
  job_id=$job_id
 
 
223
  video_id=$video_id
224
  submitted_at=$(date '+%F %T')
225
  CLAIM
 
226
  done < "$manifest"
227
  }
228
 
 
216
  write_claims() {
217
  local manifest="$1"
218
  local job_id="$2"
219
+ local task_id=0
220
  while IFS= read -r video_id; do
221
  [[ -z "$video_id" ]] && continue
222
  cat > "$CLAIM_DIR/${video_id}.claim" <<CLAIM
223
  job_id=$job_id
224
+ task_id=$task_id
225
+ job_key=${job_id}_${task_id}
226
  video_id=$video_id
227
  submitted_at=$(date '+%F %T')
228
  CLAIM
229
+ task_id=$((task_id + 1))
230
  done < "$manifest"
231
  }
232
 
utils/stats_npz.py CHANGED
@@ -39,15 +39,15 @@ def _load_stats_unlocked(stats_path: Path) -> Dict[str, Dict[str, str]]:
39
  if not stats_path.exists() or stats_path.stat().st_size == 0:
40
  return {}
41
 
42
- data = np.load(stats_path, allow_pickle=True)
43
- video_ids = [str(item) for item in data.get("video_ids", np.asarray([], dtype=object)).tolist()]
44
  stats: Dict[str, Dict[str, str]] = {}
45
- for index, video_id in enumerate(video_ids):
46
- record = {}
47
- for field in STATUS_FIELDS:
48
- values = data.get(field)
49
- record[field] = str(values[index]) if values is not None and index < len(values) else ""
50
- stats[video_id] = record
 
 
51
  return stats
52
 
53
 
 
39
  if not stats_path.exists() or stats_path.stat().st_size == 0:
40
  return {}
41
 
 
 
42
  stats: Dict[str, Dict[str, str]] = {}
43
+ with np.load(stats_path, allow_pickle=True) as data:
44
+ video_ids = [str(item) for item in data.get("video_ids", np.asarray([], dtype=object)).tolist()]
45
+ for index, video_id in enumerate(video_ids):
46
+ record = {}
47
+ for field in STATUS_FIELDS:
48
+ values = data.get(field)
49
+ record[field] = str(values[index]) if values is not None and index < len(values) else ""
50
+ stats[video_id] = record
51
  return stats
52
 
53