#!/usr/bin/env bash set -euo pipefail ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}" RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}" STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}" CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}" CONDA_ENV="${CONDA_ENV:-signx2}" SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}" if [[ ! -f "$SOURCE_METADATA_CSV" && -f "$ROOT_DIR/SignVerse-2M-metadata_ori.csv" ]]; then SOURCE_METADATA_CSV="$ROOT_DIR/SignVerse-2M-metadata_ori.csv" fi OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}" RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}" RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}" RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}" DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}" ARCHIVE_DIR="${ARCHIVE_DIR:-$RUNTIME_ROOT/archives}" STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}" PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}" PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}" PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}" PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}" PIPELINE_SYNC="${PIPELINE_SYNC:-$ROOT_DIR/scripts/sync_processed_csv_from_runtime.py}" STAGE="${STAGE:-all}" LIMIT="${LIMIT:-}" VIDEO_IDS=() FPS="${FPS:-24}" WORKERS="${WORKERS:-}" TARGET_BYTES="${TARGET_BYTES:-$((10 * 1024 * 1024 * 1024))}" TARGET_FOLDERS="${TARGET_FOLDERS:-40}" DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}" DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-60}" USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}" PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}" MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}" PROCESS_PENDING_TIMEOUT_SECONDS="${PROCESS_PENDING_TIMEOUT_SECONDS:-1800}" RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-180}" MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}" MAX_ITERATIONS="${MAX_ITERATIONS:-0}" IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}" REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}" COOKIES_FILE="${COOKIES_FILE:-$ROOT_DIR/www.youtube.com_cookies (2).txt}" COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}" EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}" SLURM_PROCESS_SUBMIT_SCRIPT="${SLURM_PROCESS_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}" SLURM_DOWNLOAD_SUBMIT_SCRIPT="${SLURM_DOWNLOAD_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_download_slurm.sh}" GPU_PARTITIONS="gpu,gpu-redhat,cgpu" GPU_ACCOUNT="${GPU_ACCOUNT:-}" ARRAY_PARALLEL="${ARRAY_PARALLEL:-}" MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-180}" DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}" DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv.lock}" DOWNLOAD_PARTITIONS="${DOWNLOAD_PARTITIONS:-main}" DOWNLOAD_ACCOUNT="${DOWNLOAD_ACCOUNT:-}" DOWNLOAD_TIME="${DOWNLOAD_TIME:-04:00:00}" DOWNLOAD_CPUS_PER_TASK="${DOWNLOAD_CPUS_PER_TASK:-1}" DOWNLOAD_MEM="${DOWNLOAD_MEM:-4G}" DOWNLOAD_ARRAY_PARALLEL="${DOWNLOAD_ARRAY_PARALLEL:-32}" DOWNLOAD_MAX_ACTIVE="${DOWNLOAD_MAX_ACTIVE:-60}" DOWNLOAD_START_STAGGER_MIN="${DOWNLOAD_START_STAGGER_MIN:-1}" DOWNLOAD_START_STAGGER_MAX="${DOWNLOAD_START_STAGGER_MAX:-3}" DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}" DOWNLOAD_PARTIAL_TIMEOUT_SECONDS="${DOWNLOAD_PARTIAL_TIMEOUT_SECONDS:-1800}" ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}" ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}" ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}" ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-4}" ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}" RUN_LOCAL="${RUN_LOCAL:-0}" FORCE_METADATA="${FORCE_METADATA:-0}" FORCE_SUBTITLES="${FORCE_SUBTITLES:-0}" FORCE_DOWNLOAD="${FORCE_DOWNLOAD:-0}" FORCE_PROCESS="${FORCE_PROCESS:-0}" SKIP_VIDEO_DOWNLOAD="${SKIP_VIDEO_DOWNLOAD:-0}" SKIP_SUBTITLES="${SKIP_SUBTITLES:-0}" DRY_RUN_UPLOAD="${DRY_RUN_UPLOAD:-0}" print_usage() { cat <&2 print_usage exit 1 ;; esac done if [[ ! -f "$CONDA_SH" ]]; then echo "Missing conda init script: $CONDA_SH" >&2 exit 1 fi if [[ "$STAGE" != "all" && "$STAGE" != "download" && "$STAGE" != "process" && "$STAGE" != "upload" ]]; then echo "Invalid --stage: $STAGE" >&2 exit 1 fi if [[ -z "${SLURM_JOB_ID:-}" && "$RUN_LOCAL" != "1" ]]; then wrapper="$ROOT_DIR/slurm/run_reproduce_independently_slurm.slurm" if [[ ! -f "$wrapper" ]]; then echo "Missing orchestration wrapper: $wrapper" >&2 exit 1 fi export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,DOWNLOAD_WORKERS=$DOWNLOAD_WORKERS,USE_SLURM_DOWNLOAD=$USE_SLURM_DOWNLOAD,SLURM_DOWNLOAD_SUBMIT_SCRIPT=$SLURM_DOWNLOAD_SUBMIT_SCRIPT,DOWNLOAD_PARTITIONS=$DOWNLOAD_PARTITIONS,DOWNLOAD_ACCOUNT=$DOWNLOAD_ACCOUNT,DOWNLOAD_TIME=$DOWNLOAD_TIME,DOWNLOAD_CPUS_PER_TASK=$DOWNLOAD_CPUS_PER_TASK,DOWNLOAD_MEM=$DOWNLOAD_MEM,DOWNLOAD_ARRAY_PARALLEL=$DOWNLOAD_ARRAY_PARALLEL,DOWNLOAD_MAX_ACTIVE=$DOWNLOAD_MAX_ACTIVE,DOWNLOAD_START_STAGGER_MIN=$DOWNLOAD_START_STAGGER_MIN,DOWNLOAD_START_STAGGER_MAX=$DOWNLOAD_START_STAGGER_MAX,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1" if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then export VIDEO_IDS_JOINED VIDEO_IDS_JOINED="${VIDEO_IDS[*]}" export_args+=",VIDEO_IDS_JOINED=$VIDEO_IDS_JOINED" fi cmd=(sbatch --partition "$ORCHESTRATOR_PARTITION" --time "$ORCHESTRATOR_TIME" --cpus-per-task "$ORCHESTRATOR_CPUS_PER_TASK" --mem "$ORCHESTRATOR_MEM" --export "$export_args" ) if [[ -n "$ORCHESTRATOR_ACCOUNT" ]]; then cmd+=(--account "$ORCHESTRATOR_ACCOUNT") fi cmd+=("$wrapper") echo "Submitting full orchestration job on partition=$ORCHESTRATOR_PARTITION stage=$STAGE" "${cmd[@]}" exit 0 fi if [[ -n "${VIDEO_IDS_JOINED:-}" && ${#VIDEO_IDS[@]} -eq 0 ]]; then IFS=' ' read -r -a VIDEO_IDS <<< "$VIDEO_IDS_JOINED" fi mkdir -p "$RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR" if [[ ! -x "$SLURM_PROCESS_SUBMIT_SCRIPT" ]]; then echo "Missing Slurm submit script: $SLURM_PROCESS_SUBMIT_SCRIPT" >&2 exit 1 fi run_in_dwpose() { # shellcheck disable=SC1090 source "$CONDA_SH" CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "$@" } run_download_stage() { local stage_limit="${1:-$LIMIT}" if [[ "$USE_SLURM_DOWNLOAD" == "1" ]]; then local cmd=(bash "$SLURM_DOWNLOAD_SUBMIT_SCRIPT" --partitions "$DOWNLOAD_PARTITIONS" --runtime-root "$RUNTIME_ROOT" --state-root "$STATE_ROOT" --time "$DOWNLOAD_TIME" --cpus-per-task "$DOWNLOAD_CPUS_PER_TASK" --mem "$DOWNLOAD_MEM" --max-backlog-videos "$RAW_BACKLOG_LIMIT" --workers "$DOWNLOAD_WORKERS" --max-active-downloads "$DOWNLOAD_MAX_ACTIVE" --claim-dir "$DOWNLOAD_CLAIM_DIR" --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH" ) if [[ -n "$DOWNLOAD_ARRAY_PARALLEL" ]]; then cmd+=(--array-parallel "$DOWNLOAD_ARRAY_PARALLEL") fi if [[ -n "$DOWNLOAD_ACCOUNT" ]]; then cmd+=(--account "$DOWNLOAD_ACCOUNT") fi if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then cmd+=(--video-ids "${VIDEO_IDS[@]}") fi if [[ $FORCE_METADATA -eq 1 ]]; then cmd+=(--force-metadata) fi if [[ $FORCE_SUBTITLES -eq 1 ]]; then cmd+=(--force-subtitles) fi if [[ $FORCE_DOWNLOAD -eq 1 ]]; then cmd+=(--force-download) fi if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then cmd+=(--skip-video-download) fi if [[ $SKIP_SUBTITLES -eq 1 ]]; then cmd+=(--skip-subtitles) fi if [[ -n "$COOKIES_FROM_BROWSER" ]]; then cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER") fi if [[ -n "$COOKIES_FILE" ]]; then cmd+=(--cookies "$COOKIES_FILE") fi if [[ -n "$EXTRACTOR_ARGS" ]]; then cmd+=(--extractor-args "$EXTRACTOR_ARGS") fi "${cmd[@]}" return $? fi local worker_count="${DOWNLOAD_WORKERS:-1}" local pids=() local failed=0 local i for ((i=1; i<=worker_count; i++)); do local cmd=(python "$PIPELINE01" --source-metadata-csv "$SOURCE_METADATA_CSV" --output-metadata-csv "$OUTPUT_METADATA_CSV" --raw-video-dir "$RAW_VIDEO_DIR" --raw-caption-dir "$RAW_CAPTION_DIR" --raw-metadata-dir "$RAW_METADATA_DIR" --dataset-dir "$DATASET_DIR" --stats-npz "$STATS_NPZ" --claim-dir "$DOWNLOAD_CLAIM_DIR" --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH" ) if [[ -n "$stage_limit" ]]; then cmd+=(--limit "$stage_limit") fi if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then cmd+=(--video-ids "${VIDEO_IDS[@]}") fi if [[ $FORCE_METADATA -eq 1 ]]; then cmd+=(--force-metadata) fi if [[ $FORCE_SUBTITLES -eq 1 ]]; then cmd+=(--force-subtitles) fi if [[ $FORCE_DOWNLOAD -eq 1 ]]; then cmd+=(--force-download) fi if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then cmd+=(--skip-video-download) fi if [[ $SKIP_SUBTITLES -eq 1 ]]; then cmd+=(--skip-subtitles) fi if [[ -n "$COOKIES_FROM_BROWSER" ]]; then cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER") fi if [[ -n "$COOKIES_FILE" ]]; then cmd+=(--cookies "$COOKIES_FILE") fi if [[ -n "$EXTRACTOR_ARGS" ]]; then cmd+=(--extractor-args "$EXTRACTOR_ARGS") fi run_in_dwpose "${cmd[@]}" & pids+=("$!") done for pid in "${pids[@]}"; do if ! wait "$pid"; then failed=1 fi done return "$failed" } RUN_PROCESS_STAGE_SUBMITTED_COUNT=0 run_process_stage() { local stage_limit="${1:-$LIMIT}" local cmd=(bash "$SLURM_PROCESS_SUBMIT_SCRIPT" --partitions "$GPU_PARTITIONS" --fps "$FPS" --max-backlog-videos "$MAX_BACKLOG_VIDEOS" --delete-source-on-success ) local output status submitted_count if [[ -n "$stage_limit" ]]; then cmd+=(--limit "$stage_limit") fi if [[ -n "$ARRAY_PARALLEL" ]]; then cmd+=(--array-parallel "$ARRAY_PARALLEL") fi if [[ -n "$GPU_ACCOUNT" ]]; then cmd+=(--account "$GPU_ACCOUNT") fi if [[ $FORCE_PROCESS -eq 1 ]]; then cmd+=(--force-process) fi output="$("${cmd[@]}")" status=$? printf '%s ' "$output" submitted_count="$(awk -F= '/^SUBMITTED_VIDEO_COUNT=/{print $2}' <<< "$output" | tail -n 1)" RUN_PROCESS_STAGE_SUBMITTED_COUNT="${submitted_count:-0}" return "$status" } run_sync_csv_stage() { python "$PIPELINE_SYNC" \ --source-metadata-csv "$SOURCE_METADATA_CSV" \ --output-metadata-csv "$OUTPUT_METADATA_CSV" \ --raw-video-dir "$RAW_VIDEO_DIR" \ --raw-caption-dir "$RAW_CAPTION_DIR" \ --raw-metadata-dir "$RAW_METADATA_DIR" \ --dataset-dir "$DATASET_DIR" \ --progress-path "$PROGRESS_JSON" \ --status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl" } run_upload_stage() { local require_target="${1:-0}" local cmd=(python "$PIPELINE03" --dataset-dir "$DATASET_DIR" --raw-video-dir "$RAW_VIDEO_DIR" --raw-caption-dir "$RAW_CAPTION_DIR" --raw-metadata-dir "$RAW_METADATA_DIR" --archive-dir "$ARCHIVE_DIR" --progress-path "$PROGRESS_JSON" --stats-npz "$STATS_NPZ" --repo-id "$REPO_ID" --target-bytes "$TARGET_BYTES" --target-folders "$TARGET_FOLDERS" ) if [[ "$require_target" == "1" ]]; then cmd+=(--require-target-bytes) fi if [[ $DRY_RUN_UPLOAD -eq 1 ]]; then cmd+=(--dry-run) fi run_in_dwpose "${cmd[@]}" } prune_processed_raw_videos() { python - <[^|]+)\|(?P[^|]+)\|(?P[^|]+)$") array_re = re.compile(r"^(\d+)_\[(.+)\]$") def expand_count(jobid_token: str) -> int: m = array_re.match(jobid_token) if not m: return 1 body = m.group(2) if "%" in body: body = body.split("%", 1)[0] total = 0 for part in body.split(","): part = part.strip() if not part: continue if "-" in part: a, b = part.split("-", 1) try: total += int(b) - int(a) + 1 except ValueError: total += 1 else: total += 1 return max(total, 1) live_download_jobs = 0 try: proc = subprocess.run(["squeue", "-u", "$USER", "-h", "-o", "%i|%j|%T"], check=False, capture_output=True, text=True) for line in (proc.stdout or "").splitlines(): m = line_re.match(line.strip()) if not m: continue if m.group("job") != "download": continue if m.group("state") not in {"RUNNING", "PENDING", "CONFIGURING"}: continue live_download_jobs += expand_count(m.group("jobid")) except Exception: live_download_jobs = 0 print(f"{source_rows}|{terminal_rows}|{uploaded_rows}|{live_download_jobs}") PY } cleanup_stale_download_claims() { python - < bool: if not job_id: return False if job_id in base_activity_cache: return base_activity_cache[job_id] alive = False try: proc = subprocess.run(["sacct", "-n", "-X", "-j", job_id, "--format=JobIDRaw,State", "-P"], check=False, capture_output=True, text=True) terminal_tokens = ("COMPLETED", "FAILED", "CANCELLED", "TIMEOUT", "OUT_OF_MEMORY", "NODE_FAIL", "PREEMPTED", "BOOT_FAIL", "DEADLINE", "REVOKED") for row in proc.stdout.splitlines(): if not row.strip() or "|" not in row: continue jid, state = row.split("|", 1) jid = jid.strip() state = state.strip().upper() if not jid.startswith(job_id): continue if state and not any(tok in state for tok in terminal_tokens): alive = True break except Exception: alive = job_id in base_states base_activity_cache[job_id] = alive return alive for claim_path in claim_dir.glob("*.claim"): try: lines = claim_path.read_text(encoding="utf-8").splitlines() except OSError: continue pid = None job_id = "" task_id = "" job_key = "" submitted_at = "" for line in lines: if line.startswith("pid="): try: pid = int(line.split("=", 1)[1].strip()) except ValueError: pid = None elif line.startswith("job_id="): job_id = line.split("=", 1)[1].strip() elif line.startswith("task_id="): task_id = line.split("=", 1)[1].strip() elif line.startswith("job_key="): job_key = line.split("=", 1)[1].strip() elif line.startswith("submitted_at="): submitted_at = line.split("=", 1)[1].strip() alive = False if submitted_at: try: submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S") if (now - submitted_dt) <= timedelta(seconds=int("$DOWNLOAD_CLAIM_GRACE_SECONDS")): alive = True except Exception: pass if (not alive) and job_key: alive = job_key in job_states elif (not alive) and job_id and task_id: alive = f"{job_id}_{task_id}" in job_states elif (not alive) and job_id: alive = base_job_alive(job_id) elif (not alive) and pid is not None: try: os.kill(pid, 0) alive = True except OSError: alive = False if not alive: claim_path.unlink(missing_ok=True) removed += 1 print(removed) PY } cleanup_stale_download_partials() { python - < timedelta(seconds=timeout_seconds) except Exception: stale_pending = False if stale_pending: subprocess.run(["scancel", job_id], check=False) claim_path.unlink(missing_ok=True) continue count += 1 print(count) PY } cleanup_stale_process_jobs() { python - < timedelta(seconds=timeout_seconds): if any(tok in reason for tok in timed_pending_reasons) or not reason: should_cancel = True except Exception: pass if should_cancel: subprocess.run(["scancel", job_id], check=False) cancelled_jobs.add(job_id) claim_path.unlink(missing_ok=True) removed_claims += 1 print(f"{len(cancelled_jobs)}|{removed_claims}") PY } cleanup_orphan_pending_process_jobs() { python - < timedelta(seconds=timeout_seconds): should_cancel = True except Exception: pass if should_cancel: subprocess.run(["scancel", job_id.strip()], check=False) cancelled.add(job_id.strip()) except Exception: pass print(len(cancelled)) PY } count_complete_pending_upload() { python - </dev/null || true)" [[ "$raw_video_bytes" =~ ^[0-9]+$ ]] || raw_video_bytes=0 echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims stale_download_partials=$stale_download_partials stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs" if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then echo "[download] reached max iterations: $MAX_ITERATIONS" break fi if [[ "$pending_download" -eq 0 ]]; then run_sync_csv_stage >/dev/null 2>&1 || true pending_download="$(count_pending_downloads 2>/dev/null || true)" [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0 if [[ "$pending_download" -eq 0 ]]; then local stop_guard source_rows terminal_rows uploaded_rows live_download_jobs stop_guard="$(count_download_stop_guard 2>/dev/null || true)" IFS="|" read -r source_rows terminal_rows uploaded_rows live_download_jobs <<< "$stop_guard" [[ "$source_rows" =~ ^[0-9]+$ ]] || source_rows=0 [[ "$terminal_rows" =~ ^[0-9]+$ ]] || terminal_rows=0 [[ "$uploaded_rows" =~ ^[0-9]+$ ]] || uploaded_rows=0 [[ "$live_download_jobs" =~ ^[0-9]+$ ]] || live_download_jobs=0 if [[ "$live_download_jobs" -gt 0 || "$terminal_rows" -lt "$source_rows" ]]; then echo "[download] stop guard blocked exit: pending_download=$pending_download source_rows=$source_rows terminal_rows=$terminal_rows uploaded_rows=$uploaded_rows live_download_jobs=$live_download_jobs" sleep "$IDLE_SLEEP_SECONDS" continue fi echo "[download] nothing left to download" break fi fi if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT" sleep "$IDLE_SLEEP_SECONDS" continue fi if [[ "$MAX_RAW_VIDEO_BYTES" -gt 0 && "$raw_video_bytes" -ge "$MAX_RAW_VIDEO_BYTES" ]]; then echo "[download] backpressure: raw_video_bytes $raw_video_bytes >= limit $MAX_RAW_VIDEO_BYTES" sleep "$IDLE_SLEEP_SECONDS" continue fi if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then echo "[download] pipeline01 failed; retry after sleep" sleep "$IDLE_SLEEP_SECONDS" else run_sync_csv_stage >/dev/null 2>&1 || true fi pending_process="$(count_pending_process 2>/dev/null || true)" [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0 local active_process_claims active_process_claims="$(count_active_process_claims 2>/dev/null || true)" [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0 if [[ "$pending_process" -ge "$MIN_PROCESS_START_BACKLOG" && "$active_process_claims" -eq 0 ]]; then if ! run_process_stage "$PROCESS_BATCH_SIZE"; then echo "[download] fallback process submit failed" else echo "[download] fallback process submit succeeded count=$RUN_PROCESS_STAGE_SUBMITTED_COUNT" fi fi done } process_loop() { local iteration=0 local submitted_total=0 echo "[process] loop started" while true; do iteration=$((iteration + 1)) local pruned stale_process_result stale_process_jobs stale_process_claims stale_orphan_process_jobs pruned="$(prune_processed_raw_videos 2>/dev/null || true)" [[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0 stale_process_result="$(cleanup_stale_process_jobs 2>/dev/null || true)" IFS="|" read -r stale_process_jobs stale_process_claims <<< "$stale_process_result" [[ "$stale_process_jobs" =~ ^[0-9]+$ ]] || stale_process_jobs=0 [[ "$stale_process_claims" =~ ^[0-9]+$ ]] || stale_process_claims=0 stale_orphan_process_jobs="$(cleanup_orphan_pending_process_jobs 2>/dev/null || true)" [[ "$stale_orphan_process_jobs" =~ ^[0-9]+$ ]] || stale_orphan_process_jobs=0 local pending_download pending_process active_process_claims remaining_limit cycle_limit pending_download="$(count_pending_downloads 2>/dev/null || true)" [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0 pending_process="$(count_pending_process 2>/dev/null || true)" [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0 active_process_claims="$(count_active_process_claims 2>/dev/null || true)" [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0 echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process slurm_backlog=$active_process_claims submitted_total=$submitted_total pruned_raw_videos=$pruned stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs" if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then echo "[process] reached max iterations: $MAX_ITERATIONS" break fi if [[ -n "$LIMIT" ]]; then remaining_limit=$((LIMIT - submitted_total)) if [[ "$remaining_limit" -le 0 ]]; then if [[ "$active_process_claims" -eq 0 ]]; then echo "[process] reached submission limit: $submitted_total >= $LIMIT" break fi echo "[process] submission limit reached; waiting for in-flight tasks to finish" sleep "$IDLE_SLEEP_SECONDS" continue fi else remaining_limit=-1 fi if [[ "$pending_process" -eq 0 ]]; then if [[ "$pending_download" -eq 0 && "$active_process_claims" -eq 0 ]]; then echo "[process] nothing left to process" break fi sleep "$IDLE_SLEEP_SECONDS" continue fi if [[ "$active_process_claims" -eq 0 && "$pending_process" -lt "$MIN_PROCESS_START_BACKLOG" && "$pending_download" -gt 0 ]]; then echo "[process] waiting for minimum raw backlog: $pending_process < $MIN_PROCESS_START_BACKLOG" sleep "$IDLE_SLEEP_SECONDS" continue fi if [[ "$active_process_claims" -ge "$MAX_BACKLOG_VIDEOS" ]]; then echo "[process] backpressure: slurm backlog $active_process_claims >= limit $MAX_BACKLOG_VIDEOS" sleep "$IDLE_SLEEP_SECONDS" continue fi cycle_limit="$PROCESS_BATCH_SIZE" if [[ "$remaining_limit" -gt 0 && "$remaining_limit" -lt "$cycle_limit" ]]; then cycle_limit="$remaining_limit" fi if ! run_process_stage "$cycle_limit"; then echo "[process] slurm submit failed; retry after sleep" sleep "$IDLE_SLEEP_SECONDS" else submitted_total=$((submitted_total + RUN_PROCESS_STAGE_SUBMITTED_COUNT)) run_sync_csv_stage >/dev/null 2>&1 || true fi sleep "$IDLE_SLEEP_SECONDS" done } upload_loop() { local iteration=0 while true; do iteration=$((iteration + 1)) local pruned pruned="$(prune_processed_raw_videos)" local pending_download pending_process complete_pending_upload complete_pending_upload_bytes pending_download="$(count_pending_downloads)" pending_process="$(count_pending_process)" complete_pending_upload="$(count_complete_pending_upload)" complete_pending_upload_bytes="$(bytes_complete_pending_upload)" echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned" if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then echo "[upload] reached max iterations: $MAX_ITERATIONS" break fi if [[ "$complete_pending_upload" -eq 0 ]]; then if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then echo "[upload] nothing left to upload" break fi sleep "$IDLE_SLEEP_SECONDS" continue fi if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && "$complete_pending_upload" -lt "$TARGET_FOLDERS" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then sleep "$IDLE_SLEEP_SECONDS" continue fi local require_target=1 if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then require_target=0 fi if ! run_upload_stage "$require_target"; then echo "[upload] pipeline03 failed; retry after sleep" sleep "$IDLE_SLEEP_SECONDS" else run_sync_csv_stage >/dev/null 2>&1 || true fi done } cleanup_background_jobs() { local jobs_to_kill=("$@") for job_pid in "${jobs_to_kill[@]}"; do if [[ -n "$job_pid" ]] && kill -0 "$job_pid" 2>/dev/null; then kill "$job_pid" 2>/dev/null || true fi done } run_all_loop() { DOWNLOAD_LOOP_PID="" PROCESS_LOOP_PID="" UPLOAD_LOOP_PID="" download_loop & DOWNLOAD_LOOP_PID=$! process_loop & PROCESS_LOOP_PID=$! upload_loop & UPLOAD_LOOP_PID=$! trap 'cleanup_background_jobs "$DOWNLOAD_LOOP_PID" "$PROCESS_LOOP_PID" "$UPLOAD_LOOP_PID"' INT TERM EXIT wait "$DOWNLOAD_LOOP_PID" wait "$PROCESS_LOOP_PID" wait "$UPLOAD_LOOP_PID" trap - INT TERM EXIT } case "$STAGE" in download) run_download_stage ;; process) run_process_stage ;; upload) run_upload_stage ;; all) run_all_loop ;; esac