sf895 commited on
Commit
990c8da
·
1 Parent(s): 2b36601

Add Slurm orchestration and downloader fixes

Browse files
reproduce_independently_slurm.sh ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}"
5
+ CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
6
+ CONDA_ENV="${CONDA_ENV:-signx2}"
7
+
8
+ SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$ROOT_DIR/Sign-DWPose-2M-metadata_ori.csv}"
9
+ OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$ROOT_DIR/Sign-DWPose-2M-metadata_processed.csv}"
10
+ RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$ROOT_DIR/raw_video}"
11
+ RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$ROOT_DIR/raw_caption}"
12
+ RAW_METADATA_DIR="${RAW_METADATA_DIR:-$ROOT_DIR/raw_metadata}"
13
+ DATASET_DIR="${DATASET_DIR:-$ROOT_DIR/dataset}"
14
+ ARCHIVE_DIR="${ARCHIVE_DIR:-$ROOT_DIR/archives}"
15
+ STATS_NPZ="${STATS_NPZ:-$ROOT_DIR/stats.npz}"
16
+ PROGRESS_JSON="${PROGRESS_JSON:-$ROOT_DIR/archive_upload_progress.json}"
17
+
18
+ PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
19
+ PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
20
+ PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}"
21
+
22
+ STAGE="${STAGE:-all}"
23
+ LIMIT="${LIMIT:-}"
24
+ VIDEO_IDS=()
25
+ FPS="${FPS:-24}"
26
+ WORKERS="${WORKERS:-}"
27
+ TARGET_BYTES="${TARGET_BYTES:-$((14 * 1024 * 1024 * 1024))}"
28
+ DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
29
+ PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
30
+ MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
31
+ RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-340}"
32
+ MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
33
+ MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
34
+ IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}"
35
+ REPO_ID="${REPO_ID:-SignerX/Sign-DWPose-2M}"
36
+ COOKIES_FILE="${COOKIES_FILE:-}"
37
+ COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
38
+ EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
39
+ SLURM_PROCESS_SUBMIT_SCRIPT="${SLURM_PROCESS_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
40
+ GPU_PARTITIONS="${GPU_PARTITIONS:-gpu}"
41
+ GPU_ACCOUNT="${GPU_ACCOUNT:-}"
42
+ ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
43
+ MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-340}"
44
+ ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
45
+ ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
46
+ ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}"
47
+ ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-2}"
48
+ ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}"
49
+ RUN_LOCAL="${RUN_LOCAL:-0}"
50
+
51
+ FORCE_METADATA="${FORCE_METADATA:-0}"
52
+ FORCE_SUBTITLES="${FORCE_SUBTITLES:-0}"
53
+ FORCE_DOWNLOAD="${FORCE_DOWNLOAD:-0}"
54
+ FORCE_PROCESS="${FORCE_PROCESS:-0}"
55
+ SKIP_VIDEO_DOWNLOAD="${SKIP_VIDEO_DOWNLOAD:-0}"
56
+ SKIP_SUBTITLES="${SKIP_SUBTITLES:-0}"
57
+ DRY_RUN_UPLOAD="${DRY_RUN_UPLOAD:-0}"
58
+
59
+ print_usage() {
60
+ cat <<EOF
61
+ Usage:
62
+ bash reproduce_independently_slurm.sh [options]
63
+
64
+ Options:
65
+ --stage {all,download,process,upload}
66
+ --limit N For stage=process/all, cap total videos submitted in this run
67
+ --video-id ID
68
+ --video-ids "ID1 ID2 ..."
69
+ --fps N
70
+ --workers N
71
+ --gpu-partitions P1[,P2,...]
72
+ --gpu-account NAME
73
+ --array-parallel N
74
+ --max-backlog-videos N
75
+ --min-process-start-backlog N
76
+ --orchestrator-partition NAME
77
+ --orchestrator-account NAME
78
+ --orchestrator-time HH:MM:SS
79
+ --orchestrator-cpus-per-task N
80
+ --orchestrator-mem SIZE
81
+ --run-local
82
+ --target-bytes N
83
+ --download-batch-size N
84
+ --process-batch-size N
85
+ --raw-backlog-limit N
86
+ --max-raw-video-bytes N
87
+ --max-iterations N
88
+ --idle-sleep-seconds N
89
+ --repo-id REPO
90
+ --cookies FILE
91
+ --cookies-from-browser BROWSER
92
+ --extractor-args VALUE
93
+ --force-metadata
94
+ --force-subtitles
95
+ --force-download
96
+ --force-process
97
+ --skip-video-download
98
+ --skip-subtitles
99
+ --dry-run-upload
100
+ --help
101
+
102
+ Examples:
103
+ bash reproduce_independently_slurm.sh --stage download --limit 10 --skip-video-download
104
+ bash reproduce_independently_slurm.sh --stage process --video-id Bdj5MUf_3Hc
105
+ bash reproduce_independently_slurm.sh --stage upload --target-bytes 500000000
106
+ bash reproduce_independently_slurm.sh --stage all --gpu-partitions gpu,gpu-redhat,cgpu --array-parallel 128
107
+ bash reproduce_independently_slurm.sh --stage all --run-local
108
+ EOF
109
+ }
110
+
111
+ while [[ $# -gt 0 ]]; do
112
+ case "$1" in
113
+ --stage)
114
+ STAGE="$2"
115
+ shift 2
116
+ ;;
117
+ --limit)
118
+ LIMIT="$2"
119
+ shift 2
120
+ ;;
121
+ --video-id)
122
+ VIDEO_IDS+=("$2")
123
+ shift 2
124
+ ;;
125
+ --video-ids)
126
+ IFS=' ' read -r -a EXTRA_IDS <<< "$2"
127
+ VIDEO_IDS+=("${EXTRA_IDS[@]}")
128
+ shift 2
129
+ ;;
130
+ --fps)
131
+ FPS="$2"
132
+ shift 2
133
+ ;;
134
+ --workers)
135
+ WORKERS="$2"
136
+ shift 2
137
+ ;;
138
+ --gpu-partitions)
139
+ GPU_PARTITIONS="$2"
140
+ shift 2
141
+ ;;
142
+ --gpu-account)
143
+ GPU_ACCOUNT="$2"
144
+ shift 2
145
+ ;;
146
+ --array-parallel)
147
+ ARRAY_PARALLEL="$2"
148
+ shift 2
149
+ ;;
150
+ --max-backlog-videos)
151
+ MAX_BACKLOG_VIDEOS="$2"
152
+ shift 2
153
+ ;;
154
+ --min-process-start-backlog)
155
+ MIN_PROCESS_START_BACKLOG="$2"
156
+ shift 2
157
+ ;;
158
+ --orchestrator-partition)
159
+ ORCHESTRATOR_PARTITION="$2"
160
+ shift 2
161
+ ;;
162
+ --orchestrator-account)
163
+ ORCHESTRATOR_ACCOUNT="$2"
164
+ shift 2
165
+ ;;
166
+ --orchestrator-time)
167
+ ORCHESTRATOR_TIME="$2"
168
+ shift 2
169
+ ;;
170
+ --orchestrator-cpus-per-task)
171
+ ORCHESTRATOR_CPUS_PER_TASK="$2"
172
+ shift 2
173
+ ;;
174
+ --orchestrator-mem)
175
+ ORCHESTRATOR_MEM="$2"
176
+ shift 2
177
+ ;;
178
+ --run-local)
179
+ RUN_LOCAL=1
180
+ shift
181
+ ;;
182
+ --target-bytes)
183
+ TARGET_BYTES="$2"
184
+ shift 2
185
+ ;;
186
+ --download-batch-size)
187
+ DOWNLOAD_BATCH_SIZE="$2"
188
+ shift 2
189
+ ;;
190
+ --process-batch-size)
191
+ PROCESS_BATCH_SIZE="$2"
192
+ shift 2
193
+ ;;
194
+ --raw-backlog-limit)
195
+ RAW_BACKLOG_LIMIT="$2"
196
+ shift 2
197
+ ;;
198
+ --max-raw-video-bytes)
199
+ MAX_RAW_VIDEO_BYTES="$2"
200
+ shift 2
201
+ ;;
202
+ --max-iterations)
203
+ MAX_ITERATIONS="$2"
204
+ shift 2
205
+ ;;
206
+ --idle-sleep-seconds)
207
+ IDLE_SLEEP_SECONDS="$2"
208
+ shift 2
209
+ ;;
210
+ --repo-id)
211
+ REPO_ID="$2"
212
+ shift 2
213
+ ;;
214
+ --cookies)
215
+ COOKIES_FILE="$2"
216
+ shift 2
217
+ ;;
218
+ --cookies-from-browser)
219
+ COOKIES_FROM_BROWSER="$2"
220
+ shift 2
221
+ ;;
222
+ --extractor-args)
223
+ EXTRACTOR_ARGS="$2"
224
+ shift 2
225
+ ;;
226
+ --force-metadata)
227
+ FORCE_METADATA=1
228
+ shift
229
+ ;;
230
+ --force-subtitles)
231
+ FORCE_SUBTITLES=1
232
+ shift
233
+ ;;
234
+ --force-download)
235
+ FORCE_DOWNLOAD=1
236
+ shift
237
+ ;;
238
+ --force-process)
239
+ FORCE_PROCESS=1
240
+ shift
241
+ ;;
242
+ --skip-video-download)
243
+ SKIP_VIDEO_DOWNLOAD=1
244
+ shift
245
+ ;;
246
+ --skip-subtitles)
247
+ SKIP_SUBTITLES=1
248
+ shift
249
+ ;;
250
+ --dry-run-upload)
251
+ DRY_RUN_UPLOAD=1
252
+ shift
253
+ ;;
254
+ -h|--help)
255
+ print_usage
256
+ exit 0
257
+ ;;
258
+ *)
259
+ echo "Unknown argument: $1" >&2
260
+ print_usage
261
+ exit 1
262
+ ;;
263
+ esac
264
+ done
265
+
266
+ if [[ ! -f "$CONDA_SH" ]]; then
267
+ echo "Missing conda init script: $CONDA_SH" >&2
268
+ exit 1
269
+ fi
270
+
271
+ if [[ "$STAGE" != "all" && "$STAGE" != "download" && "$STAGE" != "process" && "$STAGE" != "upload" ]]; then
272
+ echo "Invalid --stage: $STAGE" >&2
273
+ exit 1
274
+ fi
275
+
276
+ if [[ -z "${SLURM_JOB_ID:-}" && "$RUN_LOCAL" != "1" ]]; then
277
+ wrapper="$ROOT_DIR/slurm/run_reproduce_independently_slurm.slurm"
278
+ if [[ ! -f "$wrapper" ]]; then
279
+ echo "Missing orchestration wrapper: $wrapper" >&2
280
+ exit 1
281
+ fi
282
+ export_args="ALL,ROOT_DIR=$ROOT_DIR,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1"
283
+ if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
284
+ export VIDEO_IDS_JOINED
285
+ VIDEO_IDS_JOINED="${VIDEO_IDS[*]}"
286
+ export_args+=",VIDEO_IDS_JOINED=$VIDEO_IDS_JOINED"
287
+ fi
288
+ cmd=(sbatch
289
+ --partition "$ORCHESTRATOR_PARTITION"
290
+ --time "$ORCHESTRATOR_TIME"
291
+ --cpus-per-task "$ORCHESTRATOR_CPUS_PER_TASK"
292
+ --mem "$ORCHESTRATOR_MEM"
293
+ --export "$export_args"
294
+ )
295
+ if [[ -n "$ORCHESTRATOR_ACCOUNT" ]]; then
296
+ cmd+=(--account "$ORCHESTRATOR_ACCOUNT")
297
+ fi
298
+ cmd+=("$wrapper")
299
+ echo "Submitting full orchestration job on partition=$ORCHESTRATOR_PARTITION stage=$STAGE"
300
+ "${cmd[@]}"
301
+ exit 0
302
+ fi
303
+
304
+ if [[ -n "${VIDEO_IDS_JOINED:-}" && ${#VIDEO_IDS[@]} -eq 0 ]]; then
305
+ IFS=' ' read -r -a VIDEO_IDS <<< "$VIDEO_IDS_JOINED"
306
+ fi
307
+
308
+ mkdir -p "$RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR"
309
+ if [[ ! -x "$SLURM_PROCESS_SUBMIT_SCRIPT" ]]; then
310
+ echo "Missing Slurm submit script: $SLURM_PROCESS_SUBMIT_SCRIPT" >&2
311
+ exit 1
312
+ fi
313
+
314
+ run_in_dwpose() {
315
+ # shellcheck disable=SC1090
316
+ source "$CONDA_SH"
317
+ CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "$@"
318
+ }
319
+
320
+ run_download_stage() {
321
+ local stage_limit="${1:-$LIMIT}"
322
+ local cmd=(python "$PIPELINE01"
323
+ --source-metadata-csv "$SOURCE_METADATA_CSV"
324
+ --output-metadata-csv "$OUTPUT_METADATA_CSV"
325
+ --raw-video-dir "$RAW_VIDEO_DIR"
326
+ --raw-caption-dir "$RAW_CAPTION_DIR"
327
+ --raw-metadata-dir "$RAW_METADATA_DIR"
328
+ --dataset-dir "$DATASET_DIR"
329
+ --stats-npz "$STATS_NPZ"
330
+ )
331
+
332
+ if [[ -n "$stage_limit" ]]; then
333
+ cmd+=(--limit "$stage_limit")
334
+ fi
335
+ if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
336
+ cmd+=(--video-ids "${VIDEO_IDS[@]}")
337
+ fi
338
+ if [[ $FORCE_METADATA -eq 1 ]]; then
339
+ cmd+=(--force-metadata)
340
+ fi
341
+ if [[ $FORCE_SUBTITLES -eq 1 ]]; then
342
+ cmd+=(--force-subtitles)
343
+ fi
344
+ if [[ $FORCE_DOWNLOAD -eq 1 ]]; then
345
+ cmd+=(--force-download)
346
+ fi
347
+ if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then
348
+ cmd+=(--skip-video-download)
349
+ fi
350
+ if [[ $SKIP_SUBTITLES -eq 1 ]]; then
351
+ cmd+=(--skip-subtitles)
352
+ fi
353
+ if [[ -n "$COOKIES_FROM_BROWSER" ]]; then
354
+ cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
355
+ fi
356
+ if [[ -n "$COOKIES_FILE" ]]; then
357
+ cmd+=(--cookies "$COOKIES_FILE")
358
+ fi
359
+ if [[ -n "$EXTRACTOR_ARGS" ]]; then
360
+ cmd+=(--extractor-args "$EXTRACTOR_ARGS")
361
+ fi
362
+
363
+ run_in_dwpose "${cmd[@]}"
364
+ }
365
+
366
+ RUN_PROCESS_STAGE_SUBMITTED_COUNT=0
367
+
368
+ run_process_stage() {
369
+ local stage_limit="${1:-$LIMIT}"
370
+ local cmd=(bash "$SLURM_PROCESS_SUBMIT_SCRIPT"
371
+ --partitions "$GPU_PARTITIONS"
372
+ --fps "$FPS"
373
+ --max-backlog-videos "$MAX_BACKLOG_VIDEOS"
374
+ --delete-source-on-success
375
+ )
376
+ local output status submitted_count
377
+
378
+ if [[ -n "$stage_limit" ]]; then
379
+ cmd+=(--limit "$stage_limit")
380
+ fi
381
+ if [[ -n "$ARRAY_PARALLEL" ]]; then
382
+ cmd+=(--array-parallel "$ARRAY_PARALLEL")
383
+ fi
384
+ if [[ -n "$GPU_ACCOUNT" ]]; then
385
+ cmd+=(--account "$GPU_ACCOUNT")
386
+ fi
387
+ if [[ $FORCE_PROCESS -eq 1 ]]; then
388
+ cmd+=(--force-process)
389
+ fi
390
+
391
+ output="$("${cmd[@]}")"
392
+ status=$?
393
+ printf '%s
394
+ ' "$output"
395
+ submitted_count="$(awk -F= '/^SUBMITTED_VIDEO_COUNT=/{print $2}' <<< "$output" | tail -n 1)"
396
+ RUN_PROCESS_STAGE_SUBMITTED_COUNT="${submitted_count:-0}"
397
+ return "$status"
398
+ }
399
+
400
+ run_upload_stage() {
401
+ local require_target="${1:-0}"
402
+ local cmd=(python "$PIPELINE03"
403
+ --dataset-dir "$DATASET_DIR"
404
+ --raw-video-dir "$RAW_VIDEO_DIR"
405
+ --raw-caption-dir "$RAW_CAPTION_DIR"
406
+ --raw-metadata-dir "$RAW_METADATA_DIR"
407
+ --archive-dir "$ARCHIVE_DIR"
408
+ --progress-path "$PROGRESS_JSON"
409
+ --stats-npz "$STATS_NPZ"
410
+ --repo-id "$REPO_ID"
411
+ --target-bytes "$TARGET_BYTES"
412
+ )
413
+
414
+ if [[ "$require_target" == "1" ]]; then
415
+ cmd+=(--require-target-bytes)
416
+ fi
417
+ if [[ $DRY_RUN_UPLOAD -eq 1 ]]; then
418
+ cmd+=(--dry-run)
419
+ fi
420
+
421
+ run_in_dwpose "${cmd[@]}"
422
+ }
423
+
424
+ prune_processed_raw_videos() {
425
+ python - <<PY
426
+ from pathlib import Path
427
+ raw_dir = Path("$RAW_VIDEO_DIR")
428
+ dataset_dir = Path("$DATASET_DIR")
429
+ deleted = 0
430
+ if raw_dir.exists():
431
+ for video_path in raw_dir.iterdir():
432
+ if not video_path.is_file():
433
+ continue
434
+ marker = dataset_dir / video_path.stem / "npz" / ".complete"
435
+ if marker.exists():
436
+ video_path.unlink(missing_ok=True)
437
+ deleted += 1
438
+ print(deleted)
439
+ PY
440
+ }
441
+
442
+ dir_size_bytes() {
443
+ local dir_path="$1"
444
+ if [[ ! -d "$dir_path" ]]; then
445
+ echo 0
446
+ return
447
+ fi
448
+ find "$dir_path" -type f -printf '%s\n' | awk '{sum+=$1} END {print sum+0}'
449
+ }
450
+
451
+ count_pending_downloads() {
452
+ python - <<PY
453
+ import csv, sys
454
+ from pathlib import Path
455
+ csv.field_size_limit(min(sys.maxsize, 10 * 1024 * 1024))
456
+ path = Path("$OUTPUT_METADATA_CSV")
457
+ if not path.exists():
458
+ path = Path("$SOURCE_METADATA_CSV")
459
+ pending = 0
460
+ with path.open("r", encoding="utf-8-sig", newline="") as handle:
461
+ reader = csv.DictReader(handle)
462
+ for row in reader:
463
+ if (row.get("download_status") or "").strip() == "ok":
464
+ continue
465
+ pending += 1
466
+ print(pending)
467
+ PY
468
+ }
469
+
470
+ count_pending_process() {
471
+ python - <<PY
472
+ from pathlib import Path
473
+ raw_dir = Path("$RAW_VIDEO_DIR")
474
+ pending = 0
475
+ if raw_dir.exists():
476
+ for video_path in raw_dir.iterdir():
477
+ if video_path.is_file():
478
+ pending += 1
479
+ print(pending)
480
+ PY
481
+ }
482
+
483
+ count_active_process_claims() {
484
+ python - <<PY
485
+ import subprocess
486
+ from pathlib import Path
487
+ claim_dir = Path("$ROOT_DIR/slurm/state/claims")
488
+ claim_dir.mkdir(parents=True, exist_ok=True)
489
+ try:
490
+ result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
491
+ active_jobs = {line.split("_", 1)[0].strip() for line in result.stdout.splitlines() if line.strip()}
492
+ except Exception:
493
+ active_jobs = set()
494
+ count = 0
495
+ for claim_path in claim_dir.glob("*.claim"):
496
+ try:
497
+ lines = claim_path.read_text(encoding="utf-8").splitlines()
498
+ except OSError:
499
+ continue
500
+ job_id = ""
501
+ for line in lines:
502
+ if line.startswith("job_id="):
503
+ job_id = line.split("=", 1)[1].strip()
504
+ break
505
+ if job_id and job_id in active_jobs:
506
+ count += 1
507
+ else:
508
+ claim_path.unlink(missing_ok=True)
509
+ print(count)
510
+ PY
511
+ }
512
+
513
+ count_complete_pending_upload() {
514
+ python - <<PY
515
+ import json
516
+ from pathlib import Path
517
+ dataset_dir = Path("$DATASET_DIR")
518
+ progress_path = Path("$PROGRESS_JSON")
519
+ uploaded = set()
520
+ if progress_path.exists():
521
+ uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
522
+ count = 0
523
+ for folder_path in dataset_dir.iterdir():
524
+ if not folder_path.is_dir():
525
+ continue
526
+ if folder_path.name in uploaded:
527
+ continue
528
+ if (folder_path / "npz" / ".complete").exists():
529
+ count += 1
530
+ print(count)
531
+ PY
532
+ }
533
+
534
+ bytes_complete_pending_upload() {
535
+ python - <<PY
536
+ import json
537
+ from pathlib import Path
538
+ dataset_dir = Path("$DATASET_DIR")
539
+ progress_path = Path("$PROGRESS_JSON")
540
+ uploaded = set()
541
+ if progress_path.exists():
542
+ uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
543
+ total = 0
544
+ for folder_path in dataset_dir.iterdir():
545
+ if not folder_path.is_dir():
546
+ continue
547
+ if folder_path.name in uploaded:
548
+ continue
549
+ if not (folder_path / "npz" / ".complete").exists():
550
+ continue
551
+ for path in folder_path.rglob("*"):
552
+ if path.is_file():
553
+ total += path.stat().st_size
554
+ print(total)
555
+ PY
556
+ }
557
+
558
+ download_loop() {
559
+ local iteration=0
560
+ while true; do
561
+ iteration=$((iteration + 1))
562
+ local pruned
563
+ pruned="$(prune_processed_raw_videos)"
564
+ local pending_download pending_process raw_video_bytes
565
+ pending_download="$(count_pending_downloads)"
566
+ pending_process="$(count_pending_process)"
567
+ raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR")"
568
+ echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned"
569
+
570
+ if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
571
+ echo "[download] reached max iterations: $MAX_ITERATIONS"
572
+ break
573
+ fi
574
+ if [[ "$pending_download" -eq 0 ]]; then
575
+ echo "[download] nothing left to download"
576
+ break
577
+ fi
578
+ if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then
579
+ echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT"
580
+ sleep "$IDLE_SLEEP_SECONDS"
581
+ continue
582
+ fi
583
+ if [[ "$MAX_RAW_VIDEO_BYTES" -gt 0 && "$raw_video_bytes" -ge "$MAX_RAW_VIDEO_BYTES" ]]; then
584
+ echo "[download] backpressure: raw_video_bytes $raw_video_bytes >= limit $MAX_RAW_VIDEO_BYTES"
585
+ sleep "$IDLE_SLEEP_SECONDS"
586
+ continue
587
+ fi
588
+
589
+ if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then
590
+ echo "[download] pipeline01 failed; retry after sleep"
591
+ sleep "$IDLE_SLEEP_SECONDS"
592
+ fi
593
+ done
594
+ }
595
+
596
+ process_loop() {
597
+ local iteration=0
598
+ local submitted_total=0
599
+ while true; do
600
+ iteration=$((iteration + 1))
601
+ local pruned
602
+ pruned="$(prune_processed_raw_videos)"
603
+ local pending_download pending_process active_process_claims remaining_limit cycle_limit
604
+ pending_download="$(count_pending_downloads)"
605
+ pending_process="$(count_pending_process)"
606
+ active_process_claims="$(count_active_process_claims)"
607
+ echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process slurm_backlog=$active_process_claims submitted_total=$submitted_total pruned_raw_videos=$pruned"
608
+
609
+ if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
610
+ echo "[process] reached max iterations: $MAX_ITERATIONS"
611
+ break
612
+ fi
613
+ if [[ -n "$LIMIT" ]]; then
614
+ remaining_limit=$((LIMIT - submitted_total))
615
+ if [[ "$remaining_limit" -le 0 ]]; then
616
+ if [[ "$active_process_claims" -eq 0 ]]; then
617
+ echo "[process] reached submission limit: $submitted_total >= $LIMIT"
618
+ break
619
+ fi
620
+ echo "[process] submission limit reached; waiting for in-flight tasks to finish"
621
+ sleep "$IDLE_SLEEP_SECONDS"
622
+ continue
623
+ fi
624
+ else
625
+ remaining_limit=-1
626
+ fi
627
+ if [[ "$pending_process" -eq 0 ]]; then
628
+ if [[ "$pending_download" -eq 0 && "$active_process_claims" -eq 0 ]]; then
629
+ echo "[process] nothing left to process"
630
+ break
631
+ fi
632
+ sleep "$IDLE_SLEEP_SECONDS"
633
+ continue
634
+ fi
635
+ if [[ "$active_process_claims" -eq 0 && "$pending_process" -lt "$MIN_PROCESS_START_BACKLOG" && "$pending_download" -gt 0 ]]; then
636
+ echo "[process] waiting for minimum raw backlog: $pending_process < $MIN_PROCESS_START_BACKLOG"
637
+ sleep "$IDLE_SLEEP_SECONDS"
638
+ continue
639
+ fi
640
+ if [[ "$active_process_claims" -ge "$MAX_BACKLOG_VIDEOS" ]]; then
641
+ echo "[process] backpressure: slurm backlog $active_process_claims >= limit $MAX_BACKLOG_VIDEOS"
642
+ sleep "$IDLE_SLEEP_SECONDS"
643
+ continue
644
+ fi
645
+
646
+ cycle_limit="$PROCESS_BATCH_SIZE"
647
+ if [[ "$remaining_limit" -gt 0 && "$remaining_limit" -lt "$cycle_limit" ]]; then
648
+ cycle_limit="$remaining_limit"
649
+ fi
650
+
651
+ if ! run_process_stage "$cycle_limit"; then
652
+ echo "[process] slurm submit failed; retry after sleep"
653
+ sleep "$IDLE_SLEEP_SECONDS"
654
+ else
655
+ submitted_total=$((submitted_total + RUN_PROCESS_STAGE_SUBMITTED_COUNT))
656
+ fi
657
+ sleep "$IDLE_SLEEP_SECONDS"
658
+ done
659
+ }
660
+
661
+ upload_loop() {
662
+ local iteration=0
663
+ while true; do
664
+ iteration=$((iteration + 1))
665
+ local pruned
666
+ pruned="$(prune_processed_raw_videos)"
667
+ local pending_download pending_process complete_pending_upload complete_pending_upload_bytes
668
+ pending_download="$(count_pending_downloads)"
669
+ pending_process="$(count_pending_process)"
670
+ complete_pending_upload="$(count_complete_pending_upload)"
671
+ complete_pending_upload_bytes="$(bytes_complete_pending_upload)"
672
+ echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned"
673
+
674
+ if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
675
+ echo "[upload] reached max iterations: $MAX_ITERATIONS"
676
+ break
677
+ fi
678
+ if [[ "$complete_pending_upload" -eq 0 ]]; then
679
+ if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then
680
+ echo "[upload] nothing left to upload"
681
+ break
682
+ fi
683
+ sleep "$IDLE_SLEEP_SECONDS"
684
+ continue
685
+ fi
686
+
687
+ if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
688
+ sleep "$IDLE_SLEEP_SECONDS"
689
+ continue
690
+ fi
691
+
692
+ local require_target=1
693
+ if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then
694
+ require_target=0
695
+ fi
696
+ if ! run_upload_stage "$require_target"; then
697
+ echo "[upload] pipeline03 failed; retry after sleep"
698
+ sleep "$IDLE_SLEEP_SECONDS"
699
+ fi
700
+ done
701
+ }
702
+
703
+ cleanup_background_jobs() {
704
+ local jobs_to_kill=("$@")
705
+ for job_pid in "${jobs_to_kill[@]}"; do
706
+ if [[ -n "$job_pid" ]] && kill -0 "$job_pid" 2>/dev/null; then
707
+ kill "$job_pid" 2>/dev/null || true
708
+ fi
709
+ done
710
+ }
711
+
712
+ run_all_loop() {
713
+ DOWNLOAD_LOOP_PID=""
714
+ PROCESS_LOOP_PID=""
715
+ UPLOAD_LOOP_PID=""
716
+
717
+ download_loop &
718
+ DOWNLOAD_LOOP_PID=$!
719
+ process_loop &
720
+ PROCESS_LOOP_PID=$!
721
+ upload_loop &
722
+ UPLOAD_LOOP_PID=$!
723
+
724
+ trap 'cleanup_background_jobs "$DOWNLOAD_LOOP_PID" "$PROCESS_LOOP_PID" "$UPLOAD_LOOP_PID"' INT TERM EXIT
725
+
726
+ wait "$DOWNLOAD_LOOP_PID"
727
+ wait "$PROCESS_LOOP_PID"
728
+ wait "$UPLOAD_LOOP_PID"
729
+
730
+ trap - INT TERM EXIT
731
+ }
732
+
733
+ case "$STAGE" in
734
+ download)
735
+ run_download_stage
736
+ ;;
737
+ process)
738
+ run_process_stage
739
+ ;;
740
+ upload)
741
+ run_upload_stage
742
+ ;;
743
+ all)
744
+ run_all_loop
745
+ ;;
746
+ esac
scripts/pipeline01_download_video_fix_caption.py CHANGED
@@ -29,7 +29,7 @@ DEFAULT_RAW_CAPTION_DIR = REPO_ROOT / "raw_caption"
29
  DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
30
  DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
31
  DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
32
- DEFAULT_YT_DLP_EXTRACTOR_ARGS = "youtube:player_client=tv,web_safari,web"
33
  COOKIE_DOMAINS = ("youtube.com", "google.com", "googlevideo.com", "ytimg.com")
34
  TIMESTAMP_LINE_RE = re.compile(
35
  r"^(?P<start>\d{2}:\d{2}:\d{2}\.\d{3})\s+-->\s+(?P<end>\d{2}:\d{2}:\d{2}\.\d{3})"
@@ -51,6 +51,7 @@ DEFAULT_COLUMNS = [
51
  "metadata_status",
52
  "subtitle_status",
53
  "download_status",
 
54
  "error",
55
  "processed_at",
56
  ]
@@ -79,6 +80,7 @@ def parse_args() -> argparse.Namespace:
79
  parser.add_argument("--cookies", type=Path, default=None)
80
  parser.add_argument("--cookies-from-browser", default=None)
81
  parser.add_argument("--extractor-args", default=DEFAULT_YT_DLP_EXTRACTOR_ARGS)
 
82
  return parser.parse_args()
83
 
84
 
@@ -135,11 +137,19 @@ def write_manifest(csv_path: Path, rows: Sequence[Dict[str, str]], fieldnames: S
135
  tmp_path.replace(csv_path)
136
 
137
 
 
 
 
 
 
 
 
 
138
  def build_yt_dlp_base_command(args: argparse.Namespace) -> List[str]:
139
- command = ["yt-dlp", "--newline"]
140
- node_binary = resolve_node_binary()
141
- if node_binary is not None:
142
- command.extend(["--js-runtimes", f"node:{node_binary}"])
143
  if getattr(args, "_effective_cookies", None):
144
  command.extend(["--cookies", str(args._effective_cookies)])
145
  if args.cookies_from_browser:
@@ -149,19 +159,45 @@ def build_yt_dlp_base_command(args: argparse.Namespace) -> List[str]:
149
  return command
150
 
151
 
152
- def resolve_node_binary() -> Path | None:
153
- node_path = shutil.which("node")
154
- if node_path:
155
- return Path(node_path)
156
 
157
  python_path = Path(sys.executable).resolve()
158
  for parent in python_path.parents:
159
- fallback = parent / "bin" / "node"
160
  if fallback.exists():
161
  return fallback
162
  return None
163
 
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  def sanitize_cookie_file(cookie_path: Path) -> Path:
166
  tmp_handle = tempfile.NamedTemporaryFile(
167
  mode="w",
@@ -308,13 +344,36 @@ def find_video_file(raw_video_dir: Path, video_id: str) -> Path | None:
308
  return sorted(candidates)[0] if candidates else None
309
 
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace) -> Tuple[str, str]:
312
  raw_video_dir.mkdir(parents=True, exist_ok=True)
 
313
  command = build_yt_dlp_base_command(args)
314
  command.extend(
315
  [
316
  "--output",
317
  str(raw_video_dir / "%(id)s.%(ext)s"),
 
 
 
 
318
  "--merge-output-format",
319
  "mp4",
320
  youtube_url(video_id),
@@ -322,9 +381,10 @@ def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace)
322
  )
323
  result = run_command(command)
324
  video_path = find_video_file(raw_video_dir, video_id)
 
325
  if result.returncode != 0 and not video_path:
326
  raise RuntimeError(result.stderr.strip() or result.stdout.strip() or "video download failed")
327
- return str(video_path.relative_to(REPO_ROOT)) if video_path else "", ""
328
 
329
 
330
  def subtitle_file_language(path: Path, video_id: str) -> str:
@@ -488,7 +548,7 @@ def persist_raw_metadata(raw_metadata_dir: Path, video_id: str, metadata: Dict[s
488
  metadata_path = raw_metadata_dir / f"{video_id}.json"
489
  with metadata_path.open("w", encoding="utf-8") as handle:
490
  json.dump(metadata, handle, ensure_ascii=False, indent=2)
491
- return str(metadata_path.relative_to(REPO_ROOT))
492
 
493
 
494
  def iter_target_rows(
@@ -527,15 +587,43 @@ def collect_local_video_ids(args: argparse.Namespace) -> set[str]:
527
  return local_video_ids
528
 
529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530
  def row_needs_processing(row: Dict[str, str], args: argparse.Namespace) -> bool:
531
  if args.force_metadata or args.force_subtitles or args.force_download:
532
  return True
 
 
533
  metadata_status = (row.get("metadata_status") or "").strip()
534
  subtitle_status = (row.get("subtitle_status") or "").strip()
535
  download_status = (row.get("download_status") or "").strip()
536
 
537
  needs_metadata = metadata_status != "ok"
538
- needs_subtitles = (not args.skip_subtitles) and subtitle_status != "ok"
 
539
  needs_download = (not args.skip_video_download) and download_status != "ok"
540
  return needs_metadata or needs_subtitles or needs_download
541
 
@@ -586,11 +674,14 @@ def main() -> None:
586
  row["error"] = metadata_error
587
  row["processed_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
588
  stats_record["metadata_status"] = "failed"
589
- stats_record["last_error"] = metadata_error
590
  stats_record["updated_at"] = row["processed_at"]
 
 
591
  write_manifest(args.output_metadata_csv, rows, fieldnames)
592
  save_stats(args.stats_npz, stats)
593
  print(f" metadata failed: {metadata_error}")
 
 
594
  if args.sleep_seconds > 0:
595
  time.sleep(args.sleep_seconds)
596
  continue
@@ -629,7 +720,7 @@ def main() -> None:
629
  row["subtitle_status"] = "partial" if subtitle_payloads else "failed"
630
 
631
  row["subtitle_languages"] = "|".join(sorted(subtitle_payloads))
632
- row["subtitle_dir_path"] = str(subtitle_dir.relative_to(REPO_ROOT)) if subtitle_payloads else ""
633
  subtitle_en, subtitle_en_source = select_english_subtitle(subtitle_payloads)
634
  row["subtitle_en_source"] = subtitle_en_source
635
  if "subtitle_texts_json" in row:
@@ -645,12 +736,12 @@ def main() -> None:
645
  existing_video = find_video_file(args.raw_video_dir, video_id)
646
  if args.skip_video_download:
647
  row["download_status"] = "skipped"
648
- row["raw_video_path"] = str(existing_video.relative_to(REPO_ROOT)) if existing_video else ""
649
  else:
650
  if existing_video is None or args.force_download:
651
  row["raw_video_path"], download_error = download_video(video_id, args.raw_video_dir, args)
652
  else:
653
- row["raw_video_path"] = str(existing_video.relative_to(REPO_ROOT))
654
  row["download_status"] = "ok" if row["raw_video_path"] else "failed"
655
  except Exception as exc:
656
  download_error = str(exc)
@@ -672,6 +763,12 @@ def main() -> None:
672
  stats_record["metadata_status"] = row["metadata_status"]
673
  stats_record["subtitle_status"] = row["subtitle_status"]
674
  stats_record["download_status"] = row["download_status"]
 
 
 
 
 
 
675
  stats_record["last_error"] = row["error"]
676
  stats_record["updated_at"] = row["processed_at"]
677
  write_manifest(args.output_metadata_csv, rows, fieldnames)
@@ -681,6 +778,8 @@ def main() -> None:
681
  print(f" video download failed: {download_error}")
682
  if row["subtitle_status"] in {"failed", "partial"}:
683
  print(f" subtitle status: {row['subtitle_status']} {subtitle_error}")
 
 
684
 
685
  if args.sleep_seconds > 0:
686
  time.sleep(args.sleep_seconds)
 
29
  DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
30
  DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
31
  DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
32
+ DEFAULT_YT_DLP_EXTRACTOR_ARGS = "youtube:player_client=web_safari,web"
33
  COOKIE_DOMAINS = ("youtube.com", "google.com", "googlevideo.com", "ytimg.com")
34
  TIMESTAMP_LINE_RE = re.compile(
35
  r"^(?P<start>\d{2}:\d{2}:\d{2}\.\d{3})\s+-->\s+(?P<end>\d{2}:\d{2}:\d{2}\.\d{3})"
 
51
  "metadata_status",
52
  "subtitle_status",
53
  "download_status",
54
+ "failure_count",
55
  "error",
56
  "processed_at",
57
  ]
 
80
  parser.add_argument("--cookies", type=Path, default=None)
81
  parser.add_argument("--cookies-from-browser", default=None)
82
  parser.add_argument("--extractor-args", default=DEFAULT_YT_DLP_EXTRACTOR_ARGS)
83
+ parser.add_argument("--max-failures-before-skip", type=int, default=2)
84
  return parser.parse_args()
85
 
86
 
 
137
  tmp_path.replace(csv_path)
138
 
139
 
140
+ def repo_relative_or_absolute(path: Path) -> str:
141
+ resolved_path = path.resolve()
142
+ try:
143
+ return str(resolved_path.relative_to(REPO_ROOT.resolve()))
144
+ except ValueError:
145
+ return str(resolved_path)
146
+
147
+
148
  def build_yt_dlp_base_command(args: argparse.Namespace) -> List[str]:
149
+ command = ["yt-dlp", "--newline", "--remote-components", "ejs:github"]
150
+ js_runtime = resolve_js_runtime()
151
+ if js_runtime is not None:
152
+ command.extend(["--js-runtimes", js_runtime])
153
  if getattr(args, "_effective_cookies", None):
154
  command.extend(["--cookies", str(args._effective_cookies)])
155
  if args.cookies_from_browser:
 
159
  return command
160
 
161
 
162
+ def resolve_runtime_binary(name: str) -> Path | None:
163
+ runtime_path = shutil.which(name)
164
+ if runtime_path:
165
+ return Path(runtime_path)
166
 
167
  python_path = Path(sys.executable).resolve()
168
  for parent in python_path.parents:
169
+ fallback = parent / "bin" / name
170
  if fallback.exists():
171
  return fallback
172
  return None
173
 
174
 
175
+ def node_version_is_supported(node_binary: Path) -> bool:
176
+ try:
177
+ result = subprocess.run([str(node_binary), "--version"], capture_output=True, text=True, check=False)
178
+ except OSError:
179
+ return False
180
+ version_text = (result.stdout or result.stderr).strip().lstrip("v")
181
+ if not version_text:
182
+ return False
183
+ major_text = version_text.split(".", 1)[0]
184
+ try:
185
+ return int(major_text) >= 20
186
+ except ValueError:
187
+ return False
188
+
189
+
190
+ def resolve_js_runtime() -> str | None:
191
+ deno_binary = resolve_runtime_binary("deno")
192
+ if deno_binary is not None:
193
+ return f"deno:{deno_binary}"
194
+
195
+ node_binary = resolve_runtime_binary("node")
196
+ if node_binary is not None and node_version_is_supported(node_binary):
197
+ return f"node:{node_binary}"
198
+ return None
199
+
200
+
201
  def sanitize_cookie_file(cookie_path: Path) -> Path:
202
  tmp_handle = tempfile.NamedTemporaryFile(
203
  mode="w",
 
344
  return sorted(candidates)[0] if candidates else None
345
 
346
 
347
+ def iter_partial_download_files(raw_video_dir: Path, video_id: str) -> Iterable[Path]:
348
+ seen: set[Path] = set()
349
+ for path in raw_video_dir.glob(f"{video_id}*"):
350
+ if not path.is_file():
351
+ continue
352
+ suffixes = set(path.suffixes)
353
+ if ".part" in suffixes or ".ytdl" in suffixes or path.suffix in {".part", ".ytdl"}:
354
+ resolved = path.resolve()
355
+ if resolved not in seen:
356
+ seen.add(resolved)
357
+ yield path
358
+
359
+
360
+ def cleanup_partial_downloads(raw_video_dir: Path, video_id: str) -> None:
361
+ for partial_path in iter_partial_download_files(raw_video_dir, video_id):
362
+ partial_path.unlink(missing_ok=True)
363
+
364
+
365
  def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace) -> Tuple[str, str]:
366
  raw_video_dir.mkdir(parents=True, exist_ok=True)
367
+ cleanup_partial_downloads(raw_video_dir, video_id)
368
  command = build_yt_dlp_base_command(args)
369
  command.extend(
370
  [
371
  "--output",
372
  str(raw_video_dir / "%(id)s.%(ext)s"),
373
+ "--format",
374
+ "worstvideo*+worstaudio/worst",
375
+ "--format-sort",
376
+ "+res,+size,+br,+fps",
377
  "--merge-output-format",
378
  "mp4",
379
  youtube_url(video_id),
 
381
  )
382
  result = run_command(command)
383
  video_path = find_video_file(raw_video_dir, video_id)
384
+ cleanup_partial_downloads(raw_video_dir, video_id)
385
  if result.returncode != 0 and not video_path:
386
  raise RuntimeError(result.stderr.strip() or result.stdout.strip() or "video download failed")
387
+ return repo_relative_or_absolute(video_path) if video_path else "", ""
388
 
389
 
390
  def subtitle_file_language(path: Path, video_id: str) -> str:
 
548
  metadata_path = raw_metadata_dir / f"{video_id}.json"
549
  with metadata_path.open("w", encoding="utf-8") as handle:
550
  json.dump(metadata, handle, ensure_ascii=False, indent=2)
551
+ return repo_relative_or_absolute(metadata_path)
552
 
553
 
554
  def iter_target_rows(
 
587
  return local_video_ids
588
 
589
 
590
+ def row_failure_count(row: Dict[str, str]) -> int:
591
+ try:
592
+ return int((row.get("failure_count") or "0").strip() or "0")
593
+ except ValueError:
594
+ return 0
595
+
596
+
597
+ def record_row_failure(row: Dict[str, str], stats_record: Dict[str, object], error_text: str, max_failures_before_skip: int) -> tuple[int, bool]:
598
+ failure_count = row_failure_count(row) + 1
599
+ row["failure_count"] = str(failure_count)
600
+ stats_record["failure_count"] = row["failure_count"]
601
+ should_skip = failure_count >= max_failures_before_skip
602
+ if should_skip:
603
+ row["download_status"] = "skipped"
604
+ row["error"] = f"{error_text} | skipped after {failure_count} failures" if error_text else f"skipped after {failure_count} failures"
605
+ stats_record["download_status"] = row["download_status"]
606
+ stats_record["last_error"] = row["error"]
607
+ return failure_count, should_skip
608
+
609
+
610
+ def reset_row_failures(row: Dict[str, str], stats_record: Dict[str, object]) -> None:
611
+ row["failure_count"] = "0"
612
+ stats_record["failure_count"] = row["failure_count"]
613
+
614
+
615
  def row_needs_processing(row: Dict[str, str], args: argparse.Namespace) -> bool:
616
  if args.force_metadata or args.force_subtitles or args.force_download:
617
  return True
618
+ if row_failure_count(row) >= args.max_failures_before_skip:
619
+ return False
620
  metadata_status = (row.get("metadata_status") or "").strip()
621
  subtitle_status = (row.get("subtitle_status") or "").strip()
622
  download_status = (row.get("download_status") or "").strip()
623
 
624
  needs_metadata = metadata_status != "ok"
625
+ # Treat missing subtitles as a terminal state so videos without captions are not retried forever.
626
+ needs_subtitles = (not args.skip_subtitles) and subtitle_status not in {"ok", "missing", "skipped"}
627
  needs_download = (not args.skip_video_download) and download_status != "ok"
628
  return needs_metadata or needs_subtitles or needs_download
629
 
 
674
  row["error"] = metadata_error
675
  row["processed_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
676
  stats_record["metadata_status"] = "failed"
 
677
  stats_record["updated_at"] = row["processed_at"]
678
+ failure_count, should_skip = record_row_failure(row, stats_record, metadata_error, args.max_failures_before_skip)
679
+ stats_record["last_error"] = row["error"]
680
  write_manifest(args.output_metadata_csv, rows, fieldnames)
681
  save_stats(args.stats_npz, stats)
682
  print(f" metadata failed: {metadata_error}")
683
+ if should_skip:
684
+ print(f" skipping after {failure_count} failures")
685
  if args.sleep_seconds > 0:
686
  time.sleep(args.sleep_seconds)
687
  continue
 
720
  row["subtitle_status"] = "partial" if subtitle_payloads else "failed"
721
 
722
  row["subtitle_languages"] = "|".join(sorted(subtitle_payloads))
723
+ row["subtitle_dir_path"] = repo_relative_or_absolute(subtitle_dir) if subtitle_payloads else ""
724
  subtitle_en, subtitle_en_source = select_english_subtitle(subtitle_payloads)
725
  row["subtitle_en_source"] = subtitle_en_source
726
  if "subtitle_texts_json" in row:
 
736
  existing_video = find_video_file(args.raw_video_dir, video_id)
737
  if args.skip_video_download:
738
  row["download_status"] = "skipped"
739
+ row["raw_video_path"] = repo_relative_or_absolute(existing_video) if existing_video else ""
740
  else:
741
  if existing_video is None or args.force_download:
742
  row["raw_video_path"], download_error = download_video(video_id, args.raw_video_dir, args)
743
  else:
744
+ row["raw_video_path"] = repo_relative_or_absolute(existing_video)
745
  row["download_status"] = "ok" if row["raw_video_path"] else "failed"
746
  except Exception as exc:
747
  download_error = str(exc)
 
763
  stats_record["metadata_status"] = row["metadata_status"]
764
  stats_record["subtitle_status"] = row["subtitle_status"]
765
  stats_record["download_status"] = row["download_status"]
766
+ if errors:
767
+ failure_count, should_skip = record_row_failure(row, stats_record, row["error"], args.max_failures_before_skip)
768
+ else:
769
+ reset_row_failures(row, stats_record)
770
+ failure_count, should_skip = 0, False
771
+ stats_record["download_status"] = row["download_status"]
772
  stats_record["last_error"] = row["error"]
773
  stats_record["updated_at"] = row["processed_at"]
774
  write_manifest(args.output_metadata_csv, rows, fieldnames)
 
778
  print(f" video download failed: {download_error}")
779
  if row["subtitle_status"] in {"failed", "partial"}:
780
  print(f" subtitle status: {row['subtitle_status']} {subtitle_error}")
781
+ if should_skip:
782
+ print(f" skipping after {failure_count} failures")
783
 
784
  if args.sleep_seconds > 0:
785
  time.sleep(args.sleep_seconds)
scripts/pipeline03_upload_to_huggingface.py CHANGED
File without changes
slurm/process_dwpose_array.slurm ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=dwpose
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=8
6
+ #SBATCH --gres=gpu:1
7
+ #SBATCH --mem=32G
8
+ #SBATCH --time=24:00:00
9
+ #SBATCH --output=%x_%A_%a.out
10
+ #SBATCH --error=%x_%A_%a.err
11
+
12
+ set -euo pipefail
13
+
14
+ ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
15
+ CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
16
+ CONDA_ENV="${CONDA_ENV:-dwpose}"
17
+ RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$ROOT_DIR/raw_video}"
18
+ DATASET_DIR="${DATASET_DIR:-$ROOT_DIR/dataset}"
19
+ STATS_NPZ="${STATS_NPZ:-$ROOT_DIR/stats.npz}"
20
+ PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
21
+ FPS="${FPS:-24}"
22
+ TMP_ROOT="${TMP_ROOT:-${SLURM_TMPDIR:-/tmp}}"
23
+ FORCE_PROCESS="${FORCE_PROCESS:-0}"
24
+ DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
25
+ CLAIM_DIR="${CLAIM_DIR:-$ROOT_DIR/slurm/state/claims}"
26
+
27
+ MANIFEST="${MANIFEST:-${1:-}}"
28
+ if [[ -z "$MANIFEST" ]]; then
29
+ echo "MANIFEST is required (env var or first positional arg)." >&2
30
+ exit 1
31
+ fi
32
+ if [[ ! -f "$MANIFEST" ]]; then
33
+ echo "Manifest not found: $MANIFEST" >&2
34
+ exit 1
35
+ fi
36
+ if [[ -z "${SLURM_ARRAY_TASK_ID:-}" ]]; then
37
+ echo "SLURM_ARRAY_TASK_ID is required." >&2
38
+ exit 1
39
+ fi
40
+ if [[ ! -f "$CONDA_SH" ]]; then
41
+ echo "Missing conda init script: $CONDA_SH" >&2
42
+ exit 1
43
+ fi
44
+
45
+ VIDEO_ID="$(sed -n "$((SLURM_ARRAY_TASK_ID + 1))p" "$MANIFEST")"
46
+ if [[ -z "$VIDEO_ID" ]]; then
47
+ echo "No video id found for task index ${SLURM_ARRAY_TASK_ID} in manifest $MANIFEST" >&2
48
+ exit 1
49
+ fi
50
+
51
+ CLAIM_PATH="$CLAIM_DIR/${VIDEO_ID}.claim"
52
+ cleanup_claim() {
53
+ rm -f "$CLAIM_PATH"
54
+ }
55
+ trap cleanup_claim EXIT
56
+
57
+ export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
58
+ export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
59
+
60
+ echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset} video_id=$VIDEO_ID"
61
+
62
+ # shellcheck disable=SC1090
63
+ source "$CONDA_SH"
64
+
65
+ cmd=(python "$PIPELINE02"
66
+ --raw-video-dir "$RAW_VIDEO_DIR"
67
+ --dataset-dir "$DATASET_DIR"
68
+ --stats-npz "$STATS_NPZ"
69
+ --fps "$FPS"
70
+ --workers 1
71
+ --tmp-root "$TMP_ROOT"
72
+ --video-ids="$VIDEO_ID"
73
+ )
74
+ if [[ "$FORCE_PROCESS" == "1" ]]; then
75
+ cmd+=(--force)
76
+ fi
77
+ if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
78
+ cmd+=(--delete-source-on-success)
79
+ fi
80
+
81
+ CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "${cmd[@]}"
slurm/run_reproduce_independently_slurm.slurm ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=sign-dwpose-orch
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=2
6
+ #SBATCH --mem=8G
7
+ #SBATCH --time=24:00:00
8
+ #SBATCH --output=%x_%A.out
9
+ #SBATCH --error=%x_%A.err
10
+
11
+ set -euo pipefail
12
+
13
+ ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
14
+ SCRIPT="$ROOT_DIR/reproduce_independently_slurm.sh"
15
+
16
+ if [[ ! -x "$SCRIPT" ]]; then
17
+ echo "Missing script: $SCRIPT" >&2
18
+ exit 1
19
+ fi
20
+
21
+ echo "[$(date '+%F %T')] host=$(hostname) running orchestration stage=${STAGE:-all}"
22
+ exec bash "$SCRIPT" --run-local
slurm/submit_dwpose_slurm.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
5
+ CONDA_SH="/home/sf895/miniconda3/etc/profile.d/conda.sh"
6
+ CONDA_ENV="signx2"
7
+ RAW_VIDEO_DIR="$ROOT_DIR/raw_video"
8
+ DATASET_DIR="$ROOT_DIR/dataset"
9
+ STATS_NPZ="$ROOT_DIR/stats.npz"
10
+ SLURM_SCRIPT="$ROOT_DIR/slurm/process_dwpose_array.slurm"
11
+ MANIFEST_DIR="$ROOT_DIR/slurm/manifests"
12
+ LOG_DIR="$ROOT_DIR/slurm/logs"
13
+ STATE_DIR="$ROOT_DIR/slurm/state"
14
+ CLAIM_DIR="$STATE_DIR/claims"
15
+ LOCK_FILE="$STATE_DIR/submit.lock"
16
+ PARTITIONS="gpu"
17
+ ACCOUNT=""
18
+ TIME_LIMIT="24:00:00"
19
+ CPUS_PER_TASK="8"
20
+ MEMORY="32G"
21
+ FPS="24"
22
+ LIMIT=""
23
+ ARRAY_PARALLEL=""
24
+ MAX_BACKLOG_VIDEOS="340"
25
+ FORCE_PROCESS=0
26
+ DELETE_SOURCE_ON_SUCCESS=0
27
+
28
+ usage() {
29
+ cat <<USAGE
30
+ Usage:
31
+ bash slurm/submit_dwpose_slurm.sh [options]
32
+
33
+ Options:
34
+ --partitions P1[,P2,...] Comma-separated partitions. Default: gpu
35
+ --account NAME Optional Slurm account
36
+ --time HH:MM:SS Default: 24:00:00
37
+ --cpus-per-task N Default: 8
38
+ --mem SIZE Default: 32G
39
+ --fps N Default: 24
40
+ --limit N Only submit the first N pending, unclaimed videos this cycle
41
+ --max-backlog-videos N Max claimed queued/running videos allowed at once. Default: 340
42
+ --array-parallel N Add a %N cap to each array
43
+ --force-process Re-run videos even if marked complete
44
+ --delete-source-on-success Delete raw videos after successful processing
45
+ --help
46
+
47
+ Behavior:
48
+ - Uses a claim directory to avoid resubmitting videos that are already queued/running.
49
+ - Cleans stale claims whose Slurm jobs are no longer active.
50
+ - Builds a manifest of pending raw videos.
51
+ - Submits one Slurm array per partition.
52
+ - Each array task uses 1 GPU and processes exactly 1 video.
53
+
54
+ Examples:
55
+ bash slurm/submit_dwpose_slurm.sh
56
+ bash slurm/submit_dwpose_slurm.sh --partitions gpu --array-parallel 32
57
+ bash slurm/submit_dwpose_slurm.sh --partitions gpu --limit 500
58
+ USAGE
59
+ }
60
+
61
+ while [[ $# -gt 0 ]]; do
62
+ case "$1" in
63
+ --partitions)
64
+ PARTITIONS="$2"
65
+ shift 2
66
+ ;;
67
+ --account)
68
+ ACCOUNT="$2"
69
+ shift 2
70
+ ;;
71
+ --time)
72
+ TIME_LIMIT="$2"
73
+ shift 2
74
+ ;;
75
+ --cpus-per-task)
76
+ CPUS_PER_TASK="$2"
77
+ shift 2
78
+ ;;
79
+ --mem)
80
+ MEMORY="$2"
81
+ shift 2
82
+ ;;
83
+ --fps)
84
+ FPS="$2"
85
+ shift 2
86
+ ;;
87
+ --limit)
88
+ LIMIT="$2"
89
+ shift 2
90
+ ;;
91
+ --max-backlog-videos)
92
+ MAX_BACKLOG_VIDEOS="$2"
93
+ shift 2
94
+ ;;
95
+ --array-parallel)
96
+ ARRAY_PARALLEL="$2"
97
+ shift 2
98
+ ;;
99
+ --force-process)
100
+ FORCE_PROCESS=1
101
+ shift
102
+ ;;
103
+ --delete-source-on-success)
104
+ DELETE_SOURCE_ON_SUCCESS=1
105
+ shift
106
+ ;;
107
+ -h|--help)
108
+ usage
109
+ exit 0
110
+ ;;
111
+ *)
112
+ echo "Unknown argument: $1" >&2
113
+ usage >&2
114
+ exit 1
115
+ ;;
116
+ esac
117
+ done
118
+
119
+ mkdir -p "$MANIFEST_DIR" "$LOG_DIR" "$CLAIM_DIR"
120
+ exec 9>"$LOCK_FILE"
121
+ if ! flock -n 9; then
122
+ echo "Another submit_dwpose_slurm.sh instance is running; skip this cycle."
123
+ exit 0
124
+ fi
125
+
126
+ IFS=',' read -r -a PARTITION_LIST <<< "$PARTITIONS"
127
+
128
+ TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
129
+ BASE_MANIFEST="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.txt"
130
+ ACTIVE_JOBS_FILE="$STATE_DIR/active_jobs_${TIMESTAMP}.txt"
131
+ squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
132
+
133
+ PENDING_COUNT="$({
134
+ cd "$ROOT_DIR"
135
+ python - "$ROOT_DIR" "$RAW_VIDEO_DIR" "$DATASET_DIR" "$STATS_NPZ" "$LIMIT" "$FORCE_PROCESS" "$BASE_MANIFEST" "$CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" <<'PY'
136
+ import sys
137
+ from pathlib import Path
138
+
139
+ root_dir = Path(sys.argv[1])
140
+ raw_video_dir = Path(sys.argv[2])
141
+ dataset_dir = Path(sys.argv[3])
142
+ stats_npz = Path(sys.argv[4])
143
+ limit_arg = sys.argv[5]
144
+ force = sys.argv[6] == "1"
145
+ manifest_path = Path(sys.argv[7])
146
+ claim_dir = Path(sys.argv[8])
147
+ active_jobs_path = Path(sys.argv[9])
148
+ max_backlog = int(sys.argv[10])
149
+ limit = int(limit_arg) if limit_arg else None
150
+
151
+ sys.path.insert(0, str(root_dir))
152
+ from utils.stats_npz import load_stats
153
+
154
+ video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
155
+ stats = load_stats(stats_npz)
156
+ claim_dir.mkdir(parents=True, exist_ok=True)
157
+ active_jobs = set()
158
+ if active_jobs_path.exists():
159
+ active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding="utf-8").splitlines() if line.strip()}
160
+
161
+ active_claims = set()
162
+ for claim_path in claim_dir.glob("*.claim"):
163
+ try:
164
+ lines = claim_path.read_text(encoding="utf-8").splitlines()
165
+ except OSError:
166
+ continue
167
+ job_id = ""
168
+ for line in lines:
169
+ if line.startswith("job_id="):
170
+ job_id = line.split("=", 1)[1].strip()
171
+ break
172
+ video_id = claim_path.stem
173
+ if job_id and job_id in active_jobs:
174
+ active_claims.add(video_id)
175
+ else:
176
+ claim_path.unlink(missing_ok=True)
177
+
178
+ remaining_slots = max(0, max_backlog - len(active_claims))
179
+ if remaining_slots == 0:
180
+ manifest_path.write_text("", encoding="utf-8")
181
+ print(0)
182
+ raise SystemExit(0)
183
+
184
+ selected = []
185
+ if raw_video_dir.exists():
186
+ for path in sorted(raw_video_dir.iterdir()):
187
+ if not path.is_file() or path.suffix.lower() not in video_extensions:
188
+ continue
189
+ video_id = path.stem
190
+ if video_id in active_claims:
191
+ continue
192
+ npz_dir = dataset_dir / video_id / "npz"
193
+ complete_marker = npz_dir / ".complete"
194
+ if not force and npz_dir.exists() and complete_marker.exists() and stats.get(video_id, {}).get("process_status") == "ok":
195
+ continue
196
+ selected.append(video_id)
197
+ if len(selected) >= remaining_slots:
198
+ break
199
+ if limit is not None and len(selected) >= limit:
200
+ break
201
+ manifest_path.write_text("".join(f"{video_id}\n" for video_id in selected), encoding="utf-8")
202
+ print(len(selected))
203
+ PY
204
+ })"
205
+ rm -f "$ACTIVE_JOBS_FILE"
206
+
207
+ if [[ "$PENDING_COUNT" == "0" ]]; then
208
+ echo "No pending raw videos to process, or max backlog $MAX_BACKLOG_VIDEOS already reached."
209
+ rm -f "$BASE_MANIFEST"
210
+ exit 0
211
+ fi
212
+
213
+ echo "Created manifest: $BASE_MANIFEST"
214
+ echo "Pending videos selected this cycle: $PENDING_COUNT"
215
+
216
+ write_claims() {
217
+ local manifest="$1"
218
+ local job_id="$2"
219
+ while IFS= read -r video_id; do
220
+ [[ -z "$video_id" ]] && continue
221
+ cat > "$CLAIM_DIR/${video_id}.claim" <<CLAIM
222
+ job_id=$job_id
223
+ video_id=$video_id
224
+ submitted_at=$(date '+%F %T')
225
+ CLAIM
226
+ done < "$manifest"
227
+ }
228
+
229
+ submit_partition() {
230
+ local partition="$1"
231
+ local manifest="$2"
232
+ local count="$3"
233
+ local array_spec="0-$((count - 1))"
234
+ local job_output job_id
235
+ if [[ -n "$ARRAY_PARALLEL" ]]; then
236
+ array_spec+="%${ARRAY_PARALLEL}"
237
+ fi
238
+
239
+ local -a cmd=(sbatch
240
+ --partition "$partition"
241
+ --array "$array_spec"
242
+ --cpus-per-task "$CPUS_PER_TASK"
243
+ --mem "$MEMORY"
244
+ --time "$TIME_LIMIT"
245
+ --output "$LOG_DIR/dwpose_${partition}_%A_%a.out"
246
+ --error "$LOG_DIR/dwpose_${partition}_%A_%a.err"
247
+ --export "ALL,ROOT_DIR=$ROOT_DIR,CONDA_SH=$CONDA_SH,CONDA_ENV=$CONDA_ENV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,FPS=$FPS,FORCE_PROCESS=$FORCE_PROCESS,DELETE_SOURCE_ON_SUCCESS=$DELETE_SOURCE_ON_SUCCESS,MANIFEST=$manifest,CLAIM_DIR=$CLAIM_DIR"
248
+ )
249
+ if [[ -n "$ACCOUNT" ]]; then
250
+ cmd+=(--account "$ACCOUNT")
251
+ fi
252
+ cmd+=("$SLURM_SCRIPT")
253
+
254
+ echo "Submitting partition=$partition array=$array_spec manifest=$manifest"
255
+ job_output="$("${cmd[@]}")"
256
+ echo "$job_output"
257
+ job_id="$(awk '/Submitted batch job/ {print $4}' <<< "$job_output" | tail -n 1)"
258
+ if [[ -z "$job_id" ]]; then
259
+ echo "Failed to parse sbatch job id from output: $job_output" >&2
260
+ return 1
261
+ fi
262
+ write_claims "$manifest" "$job_id"
263
+ }
264
+
265
+ if [[ ${#PARTITION_LIST[@]} -eq 1 ]]; then
266
+ submit_partition "${PARTITION_LIST[0]}" "$BASE_MANIFEST" "$PENDING_COUNT"
267
+ echo "SUBMITTED_VIDEO_COUNT=$PENDING_COUNT"
268
+ exit 0
269
+ fi
270
+
271
+ for idx in "${!PARTITION_LIST[@]}"; do
272
+ shard_manifest="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.part${idx}.txt"
273
+ awk -v mod="${#PARTITION_LIST[@]}" -v rem="$idx" '((NR-1) % mod) == rem { print }' "$BASE_MANIFEST" > "$shard_manifest"
274
+ shard_count="$(wc -l < "$shard_manifest" | tr -d '[:space:]')"
275
+ if [[ "$shard_count" == "0" ]]; then
276
+ rm -f "$shard_manifest"
277
+ continue
278
+ fi
279
+ submit_partition "${PARTITION_LIST[$idx]}" "$shard_manifest" "$shard_count"
280
+ done
281
+
282
+ echo "SUBMITTED_VIDEO_COUNT=$PENDING_COUNT"
slurm/watch_submit_dwpose.slurm ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=dwpose-submit
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=1
6
+ #SBATCH --mem=2G
7
+ #SBATCH --time=24:00:00
8
+ #SBATCH --output=%x_%A.out
9
+ #SBATCH --error=%x_%A.err
10
+
11
+ set -euo pipefail
12
+
13
+ ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
14
+ SUBMIT_SCRIPT="${SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
15
+ GPU_PARTITIONS="${GPU_PARTITIONS:-gpu}"
16
+ GPU_ACCOUNT="${GPU_ACCOUNT:-}"
17
+ SCAN_INTERVAL_SECONDS="${SCAN_INTERVAL_SECONDS:-60}"
18
+ SUBMIT_LIMIT="${SUBMIT_LIMIT:-200}"
19
+ ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
20
+ MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-340}"
21
+ FPS="${FPS:-24}"
22
+ MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
23
+ FORCE_PROCESS="${FORCE_PROCESS:-0}"
24
+ DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
25
+
26
+ iteration=0
27
+ while true; do
28
+ iteration=$((iteration + 1))
29
+ echo "[$(date '+%F %T')] submitter iteration=$iteration"
30
+
31
+ cmd=(bash "$SUBMIT_SCRIPT"
32
+ --partitions "$GPU_PARTITIONS"
33
+ --limit "$SUBMIT_LIMIT"
34
+ --max-backlog-videos "$MAX_BACKLOG_VIDEOS"
35
+ --fps "$FPS"
36
+ )
37
+ if [[ -n "$GPU_ACCOUNT" ]]; then
38
+ cmd+=(--account "$GPU_ACCOUNT")
39
+ fi
40
+ if [[ -n "$ARRAY_PARALLEL" ]]; then
41
+ cmd+=(--array-parallel "$ARRAY_PARALLEL")
42
+ fi
43
+ if [[ "$FORCE_PROCESS" == "1" ]]; then
44
+ cmd+=(--force-process)
45
+ fi
46
+ if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
47
+ cmd+=(--delete-source-on-success)
48
+ fi
49
+
50
+ "${cmd[@]}"
51
+
52
+ if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -ge "$MAX_ITERATIONS" ]]; then
53
+ echo "Reached max iterations: $MAX_ITERATIONS"
54
+ exit 0
55
+ fi
56
+
57
+ sleep "$SCAN_INTERVAL_SECONDS"
58
+ done