| #!/usr/bin/env bash |
| set -euo pipefail |
|
|
| ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}" |
| RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}" |
| STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}" |
| CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}" |
| CONDA_ENV="${CONDA_ENV:-signx2}" |
|
|
| SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}" |
| if [[ ! -f "$SOURCE_METADATA_CSV" && -f "$ROOT_DIR/SignVerse-2M-metadata_ori.csv" ]]; then |
| SOURCE_METADATA_CSV="$ROOT_DIR/SignVerse-2M-metadata_ori.csv" |
| fi |
| OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}" |
| RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}" |
| RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}" |
| RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}" |
| DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}" |
| ARCHIVE_DIR="${ARCHIVE_DIR:-$RUNTIME_ROOT/archives}" |
| STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}" |
| PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}" |
|
|
| PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}" |
| PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}" |
| PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}" |
| PIPELINE_SYNC="${PIPELINE_SYNC:-$ROOT_DIR/scripts/sync_processed_csv_from_runtime.py}" |
|
|
| STAGE="${STAGE:-all}" |
| LIMIT="${LIMIT:-}" |
| VIDEO_IDS=() |
| FPS="${FPS:-24}" |
| WORKERS="${WORKERS:-}" |
| TARGET_BYTES="${TARGET_BYTES:-$((10 * 1024 * 1024 * 1024))}" |
| TARGET_FOLDERS="${TARGET_FOLDERS:-40}" |
| DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}" |
| DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-60}" |
| USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}" |
| PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}" |
| MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}" |
| PROCESS_PENDING_TIMEOUT_SECONDS="${PROCESS_PENDING_TIMEOUT_SECONDS:-1800}" |
| RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-180}" |
| MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}" |
| MAX_ITERATIONS="${MAX_ITERATIONS:-0}" |
| IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}" |
| REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}" |
| COOKIES_FILE="${COOKIES_FILE:-$ROOT_DIR/www.youtube.com_cookies (2).txt}" |
| COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}" |
| EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}" |
| SLURM_PROCESS_SUBMIT_SCRIPT="${SLURM_PROCESS_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}" |
| SLURM_DOWNLOAD_SUBMIT_SCRIPT="${SLURM_DOWNLOAD_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_download_slurm.sh}" |
| GPU_PARTITIONS="gpu,gpu-redhat,cgpu" |
| GPU_ACCOUNT="${GPU_ACCOUNT:-}" |
| ARRAY_PARALLEL="${ARRAY_PARALLEL:-}" |
| MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-180}" |
| DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}" |
| DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv.lock}" |
| DOWNLOAD_PARTITIONS="${DOWNLOAD_PARTITIONS:-main}" |
| DOWNLOAD_ACCOUNT="${DOWNLOAD_ACCOUNT:-}" |
| DOWNLOAD_TIME="${DOWNLOAD_TIME:-04:00:00}" |
| DOWNLOAD_CPUS_PER_TASK="${DOWNLOAD_CPUS_PER_TASK:-1}" |
| DOWNLOAD_MEM="${DOWNLOAD_MEM:-4G}" |
| DOWNLOAD_ARRAY_PARALLEL="${DOWNLOAD_ARRAY_PARALLEL:-32}" |
| DOWNLOAD_MAX_ACTIVE="${DOWNLOAD_MAX_ACTIVE:-60}" |
| DOWNLOAD_START_STAGGER_MIN="${DOWNLOAD_START_STAGGER_MIN:-1}" |
| DOWNLOAD_START_STAGGER_MAX="${DOWNLOAD_START_STAGGER_MAX:-3}" |
| DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}" |
| DOWNLOAD_PARTIAL_TIMEOUT_SECONDS="${DOWNLOAD_PARTIAL_TIMEOUT_SECONDS:-1800}" |
| ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}" |
| ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}" |
| ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}" |
| ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-4}" |
| ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}" |
| RUN_LOCAL="${RUN_LOCAL:-0}" |
|
|
| FORCE_METADATA="${FORCE_METADATA:-0}" |
| FORCE_SUBTITLES="${FORCE_SUBTITLES:-0}" |
| FORCE_DOWNLOAD="${FORCE_DOWNLOAD:-0}" |
| FORCE_PROCESS="${FORCE_PROCESS:-0}" |
| SKIP_VIDEO_DOWNLOAD="${SKIP_VIDEO_DOWNLOAD:-0}" |
| SKIP_SUBTITLES="${SKIP_SUBTITLES:-0}" |
| DRY_RUN_UPLOAD="${DRY_RUN_UPLOAD:-0}" |
|
|
| print_usage() { |
| cat <<EOF |
| Usage: |
| bash reproduce_independently_slurm.sh [options] |
| |
| Options: |
| --stage {all,download,process,upload} |
| --limit N For stage=process/all, cap total videos submitted in this run |
| --video-id ID |
| --video-ids "ID1 ID2 ..." |
| --fps N |
| --workers N |
| --runtime-root DIR |
| --state-root DIR |
| --gpu-partitions P1[,P2,...] |
| --gpu-account NAME |
| --array-parallel N |
| --max-backlog-videos N |
| --min-process-start-backlog N |
| --orchestrator-partition NAME |
| --orchestrator-account NAME |
| --orchestrator-time HH:MM:SS |
| --orchestrator-cpus-per-task N |
| --orchestrator-mem SIZE |
| --run-local |
| --target-bytes N |
| --target-folders N |
| --download-batch-size N |
| --process-batch-size N |
| --use-slurm-download {0,1} |
| --download-partitions P1[,P2,...] |
| --download-account NAME |
| --download-time HH:MM:SS |
| --download-cpus-per-task N |
| --download-mem SIZE |
| --download-array-parallel N |
| --raw-backlog-limit N |
| --max-raw-video-bytes N |
| --max-iterations N |
| --idle-sleep-seconds N |
| --repo-id REPO |
| --cookies FILE |
| --cookies-from-browser BROWSER |
| --extractor-args VALUE |
| --force-metadata |
| --force-subtitles |
| --force-download |
| --force-process |
| --skip-video-download |
| --skip-subtitles |
| --dry-run-upload |
| --help |
| |
| Examples: |
| bash reproduce_independently_slurm.sh --stage download --limit 10 --skip-video-download |
| bash reproduce_independently_slurm.sh --stage process --video-id Bdj5MUf_3Hc |
| bash reproduce_independently_slurm.sh --stage upload --target-bytes 500000000 |
| bash reproduce_independently_slurm.sh |
| bash reproduce_independently_slurm.sh --stage all --run-local |
| EOF |
| } |
|
|
| while [[ $# -gt 0 ]]; do |
| case "$1" in |
| --stage) |
| STAGE="$2" |
| shift 2 |
| ;; |
| --limit) |
| LIMIT="$2" |
| shift 2 |
| ;; |
| --video-id) |
| VIDEO_IDS+=("$2") |
| shift 2 |
| ;; |
| --video-ids) |
| IFS=' ' read -r -a EXTRA_IDS <<< "$2" |
| VIDEO_IDS+=("${EXTRA_IDS[@]}") |
| shift 2 |
| ;; |
| --fps) |
| FPS="$2" |
| shift 2 |
| ;; |
| --workers) |
| WORKERS="$2" |
| shift 2 |
| ;; |
| --runtime-root) |
| RUNTIME_ROOT="$2" |
| shift 2 |
| ;; |
| --state-root) |
| STATE_ROOT="$2" |
| shift 2 |
| ;; |
| --gpu-partitions) |
| GPU_PARTITIONS="$2" |
| shift 2 |
| ;; |
| --gpu-account) |
| GPU_ACCOUNT="$2" |
| shift 2 |
| ;; |
| --array-parallel) |
| ARRAY_PARALLEL="$2" |
| shift 2 |
| ;; |
| --max-backlog-videos) |
| MAX_BACKLOG_VIDEOS="$2" |
| shift 2 |
| ;; |
| --min-process-start-backlog) |
| MIN_PROCESS_START_BACKLOG="$2" |
| shift 2 |
| ;; |
| --orchestrator-partition) |
| ORCHESTRATOR_PARTITION="$2" |
| shift 2 |
| ;; |
| --orchestrator-account) |
| ORCHESTRATOR_ACCOUNT="$2" |
| shift 2 |
| ;; |
| --orchestrator-time) |
| ORCHESTRATOR_TIME="$2" |
| shift 2 |
| ;; |
| --orchestrator-cpus-per-task) |
| ORCHESTRATOR_CPUS_PER_TASK="$2" |
| shift 2 |
| ;; |
| --orchestrator-mem) |
| ORCHESTRATOR_MEM="$2" |
| shift 2 |
| ;; |
| --run-local) |
| RUN_LOCAL=1 |
| shift |
| ;; |
| --target-bytes) |
| TARGET_BYTES="$2" |
| shift 2 |
| ;; |
| --download-batch-size) |
| DOWNLOAD_BATCH_SIZE="$2" |
| shift 2 |
| ;; |
| --download-workers) |
| DOWNLOAD_WORKERS="$2" |
| shift 2 |
| ;; |
| --use-slurm-download) |
| USE_SLURM_DOWNLOAD="$2" |
| shift 2 |
| ;; |
| --download-partitions) |
| DOWNLOAD_PARTITIONS="$2" |
| shift 2 |
| ;; |
| --download-account) |
| DOWNLOAD_ACCOUNT="$2" |
| shift 2 |
| ;; |
| --download-time) |
| DOWNLOAD_TIME="$2" |
| shift 2 |
| ;; |
| --download-cpus-per-task) |
| DOWNLOAD_CPUS_PER_TASK="$2" |
| shift 2 |
| ;; |
| --download-mem) |
| DOWNLOAD_MEM="$2" |
| shift 2 |
| ;; |
| --download-array-parallel) |
| DOWNLOAD_ARRAY_PARALLEL="$2" |
| shift 2 |
| ;; |
| --process-batch-size) |
| PROCESS_BATCH_SIZE="$2" |
| shift 2 |
| ;; |
| --raw-backlog-limit) |
| RAW_BACKLOG_LIMIT="$2" |
| shift 2 |
| ;; |
| --max-raw-video-bytes) |
| MAX_RAW_VIDEO_BYTES="$2" |
| shift 2 |
| ;; |
| --max-iterations) |
| MAX_ITERATIONS="$2" |
| shift 2 |
| ;; |
| --idle-sleep-seconds) |
| IDLE_SLEEP_SECONDS="$2" |
| shift 2 |
| ;; |
| --repo-id) |
| REPO_ID="$2" |
| shift 2 |
| ;; |
| --cookies) |
| COOKIES_FILE="$2" |
| shift 2 |
| ;; |
| --cookies-from-browser) |
| COOKIES_FROM_BROWSER="$2" |
| shift 2 |
| ;; |
| --extractor-args) |
| EXTRACTOR_ARGS="$2" |
| shift 2 |
| ;; |
| --force-metadata) |
| FORCE_METADATA=1 |
| shift |
| ;; |
| --force-subtitles) |
| FORCE_SUBTITLES=1 |
| shift |
| ;; |
| --force-download) |
| FORCE_DOWNLOAD=1 |
| shift |
| ;; |
| --force-process) |
| FORCE_PROCESS=1 |
| shift |
| ;; |
| --skip-video-download) |
| SKIP_VIDEO_DOWNLOAD=1 |
| shift |
| ;; |
| --skip-subtitles) |
| SKIP_SUBTITLES=1 |
| shift |
| ;; |
| --dry-run-upload) |
| DRY_RUN_UPLOAD=1 |
| shift |
| ;; |
| -h|--help) |
| print_usage |
| exit 0 |
| ;; |
| *) |
| echo "Unknown argument: $1" >&2 |
| print_usage |
| exit 1 |
| ;; |
| esac |
| done |
|
|
| if [[ ! -f "$CONDA_SH" ]]; then |
| echo "Missing conda init script: $CONDA_SH" >&2 |
| exit 1 |
| fi |
|
|
| if [[ "$STAGE" != "all" && "$STAGE" != "download" && "$STAGE" != "process" && "$STAGE" != "upload" ]]; then |
| echo "Invalid --stage: $STAGE" >&2 |
| exit 1 |
| fi |
|
|
| if [[ -z "${SLURM_JOB_ID:-}" && "$RUN_LOCAL" != "1" ]]; then |
| wrapper="$ROOT_DIR/slurm/run_reproduce_independently_slurm.slurm" |
| if [[ ! -f "$wrapper" ]]; then |
| echo "Missing orchestration wrapper: $wrapper" >&2 |
| exit 1 |
| fi |
| export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,DOWNLOAD_WORKERS=$DOWNLOAD_WORKERS,USE_SLURM_DOWNLOAD=$USE_SLURM_DOWNLOAD,SLURM_DOWNLOAD_SUBMIT_SCRIPT=$SLURM_DOWNLOAD_SUBMIT_SCRIPT,DOWNLOAD_PARTITIONS=$DOWNLOAD_PARTITIONS,DOWNLOAD_ACCOUNT=$DOWNLOAD_ACCOUNT,DOWNLOAD_TIME=$DOWNLOAD_TIME,DOWNLOAD_CPUS_PER_TASK=$DOWNLOAD_CPUS_PER_TASK,DOWNLOAD_MEM=$DOWNLOAD_MEM,DOWNLOAD_ARRAY_PARALLEL=$DOWNLOAD_ARRAY_PARALLEL,DOWNLOAD_MAX_ACTIVE=$DOWNLOAD_MAX_ACTIVE,DOWNLOAD_START_STAGGER_MIN=$DOWNLOAD_START_STAGGER_MIN,DOWNLOAD_START_STAGGER_MAX=$DOWNLOAD_START_STAGGER_MAX,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1" |
| if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then |
| export VIDEO_IDS_JOINED |
| VIDEO_IDS_JOINED="${VIDEO_IDS[*]}" |
| export_args+=",VIDEO_IDS_JOINED=$VIDEO_IDS_JOINED" |
| fi |
| cmd=(sbatch |
| --partition "$ORCHESTRATOR_PARTITION" |
| --time "$ORCHESTRATOR_TIME" |
| --cpus-per-task "$ORCHESTRATOR_CPUS_PER_TASK" |
| --mem "$ORCHESTRATOR_MEM" |
| --export "$export_args" |
| ) |
| if [[ -n "$ORCHESTRATOR_ACCOUNT" ]]; then |
| cmd+=(--account "$ORCHESTRATOR_ACCOUNT") |
| fi |
| cmd+=("$wrapper") |
| echo "Submitting full orchestration job on partition=$ORCHESTRATOR_PARTITION stage=$STAGE" |
| "${cmd[@]}" |
| exit 0 |
| fi |
|
|
| if [[ -n "${VIDEO_IDS_JOINED:-}" && ${#VIDEO_IDS[@]} -eq 0 ]]; then |
| IFS=' ' read -r -a VIDEO_IDS <<< "$VIDEO_IDS_JOINED" |
| fi |
|
|
| mkdir -p "$RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR" |
| if [[ ! -x "$SLURM_PROCESS_SUBMIT_SCRIPT" ]]; then |
| echo "Missing Slurm submit script: $SLURM_PROCESS_SUBMIT_SCRIPT" >&2 |
| exit 1 |
| fi |
|
|
| run_in_dwpose() { |
| |
| source "$CONDA_SH" |
| CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "$@" |
| } |
|
|
| run_download_stage() { |
| local stage_limit="${1:-$LIMIT}" |
|
|
| if [[ "$USE_SLURM_DOWNLOAD" == "1" ]]; then |
| local cmd=(bash "$SLURM_DOWNLOAD_SUBMIT_SCRIPT" |
| --partitions "$DOWNLOAD_PARTITIONS" |
| --runtime-root "$RUNTIME_ROOT" |
| --state-root "$STATE_ROOT" |
| --time "$DOWNLOAD_TIME" |
| --cpus-per-task "$DOWNLOAD_CPUS_PER_TASK" |
| --mem "$DOWNLOAD_MEM" |
| --max-backlog-videos "$RAW_BACKLOG_LIMIT" |
| --workers "$DOWNLOAD_WORKERS" |
| --max-active-downloads "$DOWNLOAD_MAX_ACTIVE" |
| --claim-dir "$DOWNLOAD_CLAIM_DIR" |
| --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH" |
| ) |
|
|
| if [[ -n "$DOWNLOAD_ARRAY_PARALLEL" ]]; then |
| cmd+=(--array-parallel "$DOWNLOAD_ARRAY_PARALLEL") |
| fi |
| if [[ -n "$DOWNLOAD_ACCOUNT" ]]; then |
| cmd+=(--account "$DOWNLOAD_ACCOUNT") |
| fi |
| if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then |
| cmd+=(--video-ids "${VIDEO_IDS[@]}") |
| fi |
| if [[ $FORCE_METADATA -eq 1 ]]; then |
| cmd+=(--force-metadata) |
| fi |
| if [[ $FORCE_SUBTITLES -eq 1 ]]; then |
| cmd+=(--force-subtitles) |
| fi |
| if [[ $FORCE_DOWNLOAD -eq 1 ]]; then |
| cmd+=(--force-download) |
| fi |
| if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then |
| cmd+=(--skip-video-download) |
| fi |
| if [[ $SKIP_SUBTITLES -eq 1 ]]; then |
| cmd+=(--skip-subtitles) |
| fi |
| if [[ -n "$COOKIES_FROM_BROWSER" ]]; then |
| cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER") |
| fi |
| if [[ -n "$COOKIES_FILE" ]]; then |
| cmd+=(--cookies "$COOKIES_FILE") |
| fi |
| if [[ -n "$EXTRACTOR_ARGS" ]]; then |
| cmd+=(--extractor-args "$EXTRACTOR_ARGS") |
| fi |
|
|
| "${cmd[@]}" |
| return $? |
| fi |
|
|
| local worker_count="${DOWNLOAD_WORKERS:-1}" |
| local pids=() |
| local failed=0 |
| local i |
|
|
| for ((i=1; i<=worker_count; i++)); do |
| local cmd=(python "$PIPELINE01" |
| --source-metadata-csv "$SOURCE_METADATA_CSV" |
| --output-metadata-csv "$OUTPUT_METADATA_CSV" |
| --raw-video-dir "$RAW_VIDEO_DIR" |
| --raw-caption-dir "$RAW_CAPTION_DIR" |
| --raw-metadata-dir "$RAW_METADATA_DIR" |
| --dataset-dir "$DATASET_DIR" |
| --stats-npz "$STATS_NPZ" |
| --claim-dir "$DOWNLOAD_CLAIM_DIR" |
| --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH" |
| ) |
|
|
| if [[ -n "$stage_limit" ]]; then |
| cmd+=(--limit "$stage_limit") |
| fi |
| if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then |
| cmd+=(--video-ids "${VIDEO_IDS[@]}") |
| fi |
| if [[ $FORCE_METADATA -eq 1 ]]; then |
| cmd+=(--force-metadata) |
| fi |
| if [[ $FORCE_SUBTITLES -eq 1 ]]; then |
| cmd+=(--force-subtitles) |
| fi |
| if [[ $FORCE_DOWNLOAD -eq 1 ]]; then |
| cmd+=(--force-download) |
| fi |
| if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then |
| cmd+=(--skip-video-download) |
| fi |
| if [[ $SKIP_SUBTITLES -eq 1 ]]; then |
| cmd+=(--skip-subtitles) |
| fi |
| if [[ -n "$COOKIES_FROM_BROWSER" ]]; then |
| cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER") |
| fi |
| if [[ -n "$COOKIES_FILE" ]]; then |
| cmd+=(--cookies "$COOKIES_FILE") |
| fi |
| if [[ -n "$EXTRACTOR_ARGS" ]]; then |
| cmd+=(--extractor-args "$EXTRACTOR_ARGS") |
| fi |
|
|
| run_in_dwpose "${cmd[@]}" & |
| pids+=("$!") |
| done |
|
|
| for pid in "${pids[@]}"; do |
| if ! wait "$pid"; then |
| failed=1 |
| fi |
| done |
|
|
| return "$failed" |
| } |
|
|
| RUN_PROCESS_STAGE_SUBMITTED_COUNT=0 |
|
|
| run_process_stage() { |
| local stage_limit="${1:-$LIMIT}" |
| local cmd=(bash "$SLURM_PROCESS_SUBMIT_SCRIPT" |
| --partitions "$GPU_PARTITIONS" |
| --fps "$FPS" |
| --max-backlog-videos "$MAX_BACKLOG_VIDEOS" |
| --delete-source-on-success |
| ) |
| local output status submitted_count |
|
|
| if [[ -n "$stage_limit" ]]; then |
| cmd+=(--limit "$stage_limit") |
| fi |
| if [[ -n "$ARRAY_PARALLEL" ]]; then |
| cmd+=(--array-parallel "$ARRAY_PARALLEL") |
| fi |
| if [[ -n "$GPU_ACCOUNT" ]]; then |
| cmd+=(--account "$GPU_ACCOUNT") |
| fi |
| if [[ $FORCE_PROCESS -eq 1 ]]; then |
| cmd+=(--force-process) |
| fi |
|
|
| output="$("${cmd[@]}")" |
| status=$? |
| printf '%s |
| ' "$output" |
| submitted_count="$(awk -F= '/^SUBMITTED_VIDEO_COUNT=/{print $2}' <<< "$output" | tail -n 1)" |
| RUN_PROCESS_STAGE_SUBMITTED_COUNT="${submitted_count:-0}" |
| return "$status" |
| } |
|
|
| run_sync_csv_stage() { |
| python "$PIPELINE_SYNC" \ |
| --source-metadata-csv "$SOURCE_METADATA_CSV" \ |
| --output-metadata-csv "$OUTPUT_METADATA_CSV" \ |
| --raw-video-dir "$RAW_VIDEO_DIR" \ |
| --raw-caption-dir "$RAW_CAPTION_DIR" \ |
| --raw-metadata-dir "$RAW_METADATA_DIR" \ |
| --dataset-dir "$DATASET_DIR" \ |
| --progress-path "$PROGRESS_JSON" \ |
| --status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl" |
| } |
|
|
| run_upload_stage() { |
| local require_target="${1:-0}" |
| local cmd=(python "$PIPELINE03" |
| --dataset-dir "$DATASET_DIR" |
| --raw-video-dir "$RAW_VIDEO_DIR" |
| --raw-caption-dir "$RAW_CAPTION_DIR" |
| --raw-metadata-dir "$RAW_METADATA_DIR" |
| --archive-dir "$ARCHIVE_DIR" |
| --progress-path "$PROGRESS_JSON" |
| --stats-npz "$STATS_NPZ" |
| --repo-id "$REPO_ID" |
| --target-bytes "$TARGET_BYTES" |
| --target-folders "$TARGET_FOLDERS" |
| ) |
|
|
| if [[ "$require_target" == "1" ]]; then |
| cmd+=(--require-target-bytes) |
| fi |
| if [[ $DRY_RUN_UPLOAD -eq 1 ]]; then |
| cmd+=(--dry-run) |
| fi |
|
|
| run_in_dwpose "${cmd[@]}" |
| } |
|
|
| prune_processed_raw_videos() { |
| python - <<PY |
| from pathlib import Path |
| raw_dir = Path("$RAW_VIDEO_DIR") |
| dataset_dir = Path("$DATASET_DIR") |
| video_extensions = {".mp4", ".mkv", ".webm", ".mov"} |
| removed = 0 |
| if raw_dir.exists(): |
| for video_path in raw_dir.iterdir(): |
| if not video_path.is_file() or video_path.suffix.lower() not in video_extensions: |
| continue |
| video_id = video_path.stem |
| if (dataset_dir / video_id / "npz" / ".complete").exists(): |
| video_path.unlink(missing_ok=True) |
| removed += 1 |
| print(removed) |
| PY |
| } |
|
|
| count_pending_downloads() { |
| python - <<PY |
| import csv |
| from pathlib import Path |
| csv_path = Path("$OUTPUT_METADATA_CSV") |
| pending = 0 |
| if csv_path.exists(): |
| with csv_path.open("r", encoding="utf-8", newline="") as handle: |
| reader = csv.DictReader(handle) |
| for row in reader: |
| if (row.get("download_status") or "").strip() in {"ok", "skipped"}: |
| continue |
| pending += 1 |
| print(pending) |
| PY |
| } |
|
|
| count_pending_process() { |
| python - <<PY |
| from pathlib import Path |
| raw_dir = Path("$RAW_VIDEO_DIR") |
| video_extensions = {".mp4", ".mkv", ".webm", ".mov"} |
| pending = 0 |
| if raw_dir.exists(): |
| for video_path in raw_dir.iterdir(): |
| if video_path.is_file() and video_path.suffix.lower() in video_extensions: |
| pending += 1 |
| print(pending) |
| PY |
| } |
|
|
| count_download_stop_guard() { |
| python - <<PY |
| import csv |
| import json |
| import re |
| import subprocess |
| from pathlib import Path |
| source_csv = Path("$SOURCE_METADATA_CSV") |
| processed_csv = Path("$OUTPUT_METADATA_CSV") |
| progress_json = Path("$PROGRESS_JSON") |
| source_rows = 0 |
| terminal_rows = 0 |
| uploaded_rows = 0 |
| if source_csv.exists(): |
| with source_csv.open("r", encoding="utf-8-sig", newline="") as handle: |
| for row in csv.reader(handle): |
| if row and (row[0] or "").strip(): |
| source_rows += 1 |
| if processed_csv.exists(): |
| with processed_csv.open("r", encoding="utf-8-sig", newline="") as handle: |
| reader = csv.DictReader(handle) |
| for row in reader: |
| if (row.get("download_status") or "").strip() in {"ok", "skipped"}: |
| terminal_rows += 1 |
| if progress_json.exists(): |
| try: |
| uploaded_rows = len(json.loads(progress_json.read_text()).get("uploaded_folders", {})) |
| except Exception: |
| uploaded_rows = 0 |
| line_re = re.compile(r"^(?P<jobid>[^|]+)\|(?P<job>[^|]+)\|(?P<state>[^|]+)$") |
| array_re = re.compile(r"^(\d+)_\[(.+)\]$") |
| def expand_count(jobid_token: str) -> int: |
| m = array_re.match(jobid_token) |
| if not m: |
| return 1 |
| body = m.group(2) |
| if "%" in body: |
| body = body.split("%", 1)[0] |
| total = 0 |
| for part in body.split(","): |
| part = part.strip() |
| if not part: |
| continue |
| if "-" in part: |
| a, b = part.split("-", 1) |
| try: |
| total += int(b) - int(a) + 1 |
| except ValueError: |
| total += 1 |
| else: |
| total += 1 |
| return max(total, 1) |
| live_download_jobs = 0 |
| try: |
| proc = subprocess.run(["squeue", "-u", "$USER", "-h", "-o", "%i|%j|%T"], check=False, capture_output=True, text=True) |
| for line in (proc.stdout or "").splitlines(): |
| m = line_re.match(line.strip()) |
| if not m: |
| continue |
| if m.group("job") != "download": |
| continue |
| if m.group("state") not in {"RUNNING", "PENDING", "CONFIGURING"}: |
| continue |
| live_download_jobs += expand_count(m.group("jobid")) |
| except Exception: |
| live_download_jobs = 0 |
| print(f"{source_rows}|{terminal_rows}|{uploaded_rows}|{live_download_jobs}") |
| PY |
| } |
|
|
| cleanup_stale_download_claims() { |
| python - <<PY |
| import os |
| import subprocess |
| from datetime import datetime, timedelta |
| from pathlib import Path |
| claim_dir = Path("$DOWNLOAD_CLAIM_DIR") |
| claim_dir.mkdir(parents=True, exist_ok=True) |
| removed = 0 |
| now = datetime.now() |
| job_states = {} |
| base_states = {} |
| try: |
| result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True) |
| for line in result.stdout.splitlines(): |
| if not line.strip() or "|" not in line: |
| continue |
| job_key, state = line.split("|", 1) |
| job_key = job_key.strip() |
| state = state.strip().upper() |
| job_states[job_key] = state |
| base_states[job_key.split("_", 1)[0]] = state |
| except Exception: |
| job_states = {} |
| base_states = {} |
| base_activity_cache = {} |
| def base_job_alive(job_id: str) -> bool: |
| if not job_id: |
| return False |
| if job_id in base_activity_cache: |
| return base_activity_cache[job_id] |
| alive = False |
| try: |
| proc = subprocess.run(["sacct", "-n", "-X", "-j", job_id, "--format=JobIDRaw,State", "-P"], check=False, capture_output=True, text=True) |
| terminal_tokens = ("COMPLETED", "FAILED", "CANCELLED", "TIMEOUT", "OUT_OF_MEMORY", "NODE_FAIL", "PREEMPTED", "BOOT_FAIL", "DEADLINE", "REVOKED") |
| for row in proc.stdout.splitlines(): |
| if not row.strip() or "|" not in row: |
| continue |
| jid, state = row.split("|", 1) |
| jid = jid.strip() |
| state = state.strip().upper() |
| if not jid.startswith(job_id): |
| continue |
| if state and not any(tok in state for tok in terminal_tokens): |
| alive = True |
| break |
| except Exception: |
| alive = job_id in base_states |
| base_activity_cache[job_id] = alive |
| return alive |
| for claim_path in claim_dir.glob("*.claim"): |
| try: |
| lines = claim_path.read_text(encoding="utf-8").splitlines() |
| except OSError: |
| continue |
| pid = None |
| job_id = "" |
| task_id = "" |
| job_key = "" |
| submitted_at = "" |
| for line in lines: |
| if line.startswith("pid="): |
| try: |
| pid = int(line.split("=", 1)[1].strip()) |
| except ValueError: |
| pid = None |
| elif line.startswith("job_id="): |
| job_id = line.split("=", 1)[1].strip() |
| elif line.startswith("task_id="): |
| task_id = line.split("=", 1)[1].strip() |
| elif line.startswith("job_key="): |
| job_key = line.split("=", 1)[1].strip() |
| elif line.startswith("submitted_at="): |
| submitted_at = line.split("=", 1)[1].strip() |
| alive = False |
| if submitted_at: |
| try: |
| submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S") |
| if (now - submitted_dt) <= timedelta(seconds=int("$DOWNLOAD_CLAIM_GRACE_SECONDS")): |
| alive = True |
| except Exception: |
| pass |
| if (not alive) and job_key: |
| alive = job_key in job_states |
| elif (not alive) and job_id and task_id: |
| alive = f"{job_id}_{task_id}" in job_states |
| elif (not alive) and job_id: |
| alive = base_job_alive(job_id) |
| elif (not alive) and pid is not None: |
| try: |
| os.kill(pid, 0) |
| alive = True |
| except OSError: |
| alive = False |
| if not alive: |
| claim_path.unlink(missing_ok=True) |
| removed += 1 |
| print(removed) |
| PY |
| } |
|
|
| cleanup_stale_download_partials() { |
| python - <<PY |
| import time |
| from pathlib import Path |
| claim_dir = Path("$DOWNLOAD_CLAIM_DIR") |
| raw_dir = Path("$RAW_VIDEO_DIR") |
| timeout = int("$DOWNLOAD_PARTIAL_TIMEOUT_SECONDS") |
| now = time.time() |
| active_ids = set() |
| try: |
| claim_dir.mkdir(parents=True, exist_ok=True) |
| for claim_path in claim_dir.glob("*.claim"): |
| active_ids.add(claim_path.stem) |
| except Exception: |
| pass |
| removed = 0 |
| if raw_dir.exists(): |
| for path in raw_dir.iterdir(): |
| if not path.is_file(): |
| continue |
| name = path.name |
| if not (name.endswith(".part") or name.endswith(".ytdl") or ".mp4.part" in name or ".mp4.ytdl" in name or ".webm.part" in name or ".webm.ytdl" in name or ".mkv.part" in name or ".mkv.ytdl" in name or ".mov.part" in name or ".mov.ytdl" in name): |
| continue |
| video_id = name |
| for suffix in (".mp4.part", ".webm.part", ".mkv.part", ".mov.part", ".mp4.ytdl", ".webm.ytdl", ".mkv.ytdl", ".mov.ytdl", ".part", ".ytdl"): |
| if video_id.endswith(suffix): |
| video_id = video_id[:-len(suffix)] |
| break |
| if video_id in active_ids: |
| continue |
| try: |
| age = now - path.stat().st_mtime |
| except OSError: |
| continue |
| if age < timeout: |
| continue |
| try: |
| path.unlink() |
| removed += 1 |
| except OSError: |
| pass |
| print(removed) |
| PY |
| } |
|
|
| dir_size_bytes() { |
| python - <<PY |
| from pathlib import Path |
| root = Path("$1") |
| total = 0 |
| if root.exists(): |
| for path in root.rglob("*"): |
| if path.is_file(): |
| try: |
| total += path.stat().st_size |
| except OSError: |
| pass |
| print(total) |
| PY |
| } |
|
|
| count_active_process_claims() { |
| python - <<PY |
| import subprocess |
| from datetime import datetime, timedelta |
| from pathlib import Path |
| from datetime import datetime, timedelta |
| claim_dir = Path("$STATE_ROOT/slurm/state/claims") |
| claim_dir.mkdir(parents=True, exist_ok=True) |
| timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS") |
| now = datetime.now() |
| job_states = {} |
| base_states = {} |
| try: |
| result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True) |
| for line in result.stdout.splitlines(): |
| if not line.strip() or "|" not in line: |
| continue |
| job_key, state = line.split("|", 1) |
| job_key = job_key.strip() |
| state = state.strip().upper() |
| job_states[job_key] = state |
| base_states[job_key.split("_", 1)[0]] = state |
| except Exception: |
| job_states = {} |
| base_states = {} |
| count = 0 |
| for claim_path in claim_dir.glob("*.claim"): |
| try: |
| lines = claim_path.read_text(encoding="utf-8").splitlines() |
| except OSError: |
| continue |
| meta = {} |
| for line in lines: |
| if "=" in line: |
| k, v = line.split("=", 1) |
| meta[k.strip()] = v.strip() |
| job_id = meta.get("job_id", "") |
| task_id = meta.get("task_id", "") |
| job_key = meta.get("job_key", "") |
| if not job_id: |
| claim_path.unlink(missing_ok=True) |
| continue |
| state = None |
| if job_key: |
| state = job_states.get(job_key) |
| elif task_id: |
| state = job_states.get(f"{job_id}_{task_id}") |
| else: |
| state = base_states.get(job_id) |
| if not state: |
| claim_path.unlink(missing_ok=True) |
| continue |
| submitted_at = meta.get("submitted_at", "") |
| stale_pending = False |
| if state == "PENDING" and submitted_at: |
| try: |
| submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S") |
| stale_pending = (now - submitted_dt) > timedelta(seconds=timeout_seconds) |
| except Exception: |
| stale_pending = False |
| if stale_pending: |
| subprocess.run(["scancel", job_id], check=False) |
| claim_path.unlink(missing_ok=True) |
| continue |
| count += 1 |
| print(count) |
| PY |
| } |
|
|
| cleanup_stale_process_jobs() { |
| python - <<PY |
| import subprocess |
| from datetime import datetime, timedelta |
| from pathlib import Path |
| |
| claim_dir = Path("$STATE_ROOT/slurm/state/claims") |
| claim_dir.mkdir(parents=True, exist_ok=True) |
| timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS") |
| now = datetime.now() |
| |
| bad_reason_tokens = ( |
| "ReqNodeNotAvail", |
| "UnavailableNodes", |
| "NodeDown", |
| "PartitionDown", |
| "PartitionInactive", |
| "ReservationDeleted", |
| ) |
| timed_pending_reasons = ("Priority", "Resources", "QOS") |
| |
| squeue_rows = {} |
| try: |
| proc = subprocess.run( |
| ["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%i|%T|%R"], |
| check=False, |
| capture_output=True, |
| text=True, |
| ) |
| for line in proc.stdout.splitlines(): |
| if not line.strip() or "|" not in line: |
| continue |
| job_key, state, reason = line.split("|", 2) |
| squeue_rows[job_key.strip()] = (state.strip().upper(), reason.strip()) |
| except Exception: |
| squeue_rows = {} |
| |
| cancelled_jobs = set() |
| removed_claims = 0 |
| |
| for claim_path in claim_dir.glob("*.claim"): |
| try: |
| lines = claim_path.read_text(encoding="utf-8").splitlines() |
| except OSError: |
| continue |
| meta = {} |
| for line in lines: |
| if "=" in line: |
| k, v = line.split("=", 1) |
| meta[k.strip()] = v.strip() |
| job_id = meta.get("job_id", "") |
| task_id = meta.get("task_id", "") |
| job_key = meta.get("job_key", "") or (f"{job_id}_{task_id}" if job_id and task_id else "") |
| submitted_at = meta.get("submitted_at", "") |
| if not job_key: |
| claim_path.unlink(missing_ok=True) |
| removed_claims += 1 |
| continue |
| row = squeue_rows.get(job_key) |
| if not row: |
| claim_path.unlink(missing_ok=True) |
| removed_claims += 1 |
| continue |
| state, reason = row |
| should_cancel = False |
| if state == "PENDING": |
| if any(tok in reason for tok in bad_reason_tokens): |
| should_cancel = True |
| elif submitted_at: |
| try: |
| submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S") |
| if (now - submitted_dt) > timedelta(seconds=timeout_seconds): |
| if any(tok in reason for tok in timed_pending_reasons) or not reason: |
| should_cancel = True |
| except Exception: |
| pass |
| if should_cancel: |
| subprocess.run(["scancel", job_id], check=False) |
| cancelled_jobs.add(job_id) |
| claim_path.unlink(missing_ok=True) |
| removed_claims += 1 |
| |
| print(f"{len(cancelled_jobs)}|{removed_claims}") |
| PY |
| } |
|
|
|
|
| cleanup_orphan_pending_process_jobs() { |
| python - <<PY |
| import subprocess |
| from datetime import datetime, timedelta |
| |
| bad_reason_tokens = ( |
| "ReqNodeNotAvail", |
| "UnavailableNodes", |
| "NodeDown", |
| "PartitionDown", |
| "PartitionInactive", |
| "ReservationDeleted", |
| ) |
| timed_pending_reasons = ("Priority", "Resources", "QOS") |
| timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS") |
| now = datetime.now() |
| |
| cancelled = set() |
| try: |
| proc = subprocess.run( |
| ["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%A|%T|%R|%V"], |
| check=False, |
| capture_output=True, |
| text=True, |
| ) |
| for line in proc.stdout.splitlines(): |
| if not line.strip() or "|" not in line: |
| continue |
| job_id, state, reason, submit_time = line.split("|", 3) |
| state = state.strip().upper() |
| reason = reason.strip() |
| if state != "PENDING": |
| continue |
| should_cancel = False |
| if any(tok in reason for tok in bad_reason_tokens): |
| should_cancel = True |
| elif any(tok in reason for tok in timed_pending_reasons): |
| submit_time = submit_time.strip() |
| if submit_time and submit_time != "N/A": |
| try: |
| submitted_dt = datetime.strptime(submit_time, "%Y-%m-%dT%H:%M:%S") |
| if (now - submitted_dt) > timedelta(seconds=timeout_seconds): |
| should_cancel = True |
| except Exception: |
| pass |
| if should_cancel: |
| subprocess.run(["scancel", job_id.strip()], check=False) |
| cancelled.add(job_id.strip()) |
| except Exception: |
| pass |
| print(len(cancelled)) |
| PY |
| } |
|
|
| count_complete_pending_upload() { |
| python - <<PY |
| import json |
| from pathlib import Path |
| dataset_dir = Path("$DATASET_DIR") |
| progress_path = Path("$PROGRESS_JSON") |
| uploaded = set() |
| if progress_path.exists(): |
| uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys()) |
| count = 0 |
| if dataset_dir.exists(): |
| for folder_path in dataset_dir.iterdir(): |
| if not folder_path.is_dir(): |
| continue |
| if folder_path.name in uploaded: |
| continue |
| if (folder_path / "npz" / ".complete").exists(): |
| count += 1 |
| print(count) |
| PY |
| } |
|
|
| bytes_complete_pending_upload() { |
| python - <<PY |
| import json |
| from pathlib import Path |
| dataset_dir = Path("$DATASET_DIR") |
| progress_path = Path("$PROGRESS_JSON") |
| uploaded = set() |
| if progress_path.exists(): |
| uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys()) |
| total = 0 |
| if dataset_dir.exists(): |
| for folder_path in dataset_dir.iterdir(): |
| if not folder_path.is_dir(): |
| continue |
| if folder_path.name in uploaded: |
| continue |
| if not (folder_path / "npz" / ".complete").exists(): |
| continue |
| for path in folder_path.rglob("*"): |
| if path.is_file(): |
| try: |
| total += path.stat().st_size |
| except OSError: |
| pass |
| print(total) |
| PY |
| } |
|
|
| download_loop() { |
| local iteration=0 |
| while true; do |
| iteration=$((iteration + 1)) |
| local pruned stale_download_claims stale_download_partials stale_process_result stale_process_jobs stale_process_claims stale_orphan_process_jobs |
| pruned="$(prune_processed_raw_videos 2>/dev/null || true)" |
| [[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0 |
| stale_download_claims="$(cleanup_stale_download_claims 2>/dev/null || true)" |
| [[ "$stale_download_claims" =~ ^[0-9]+$ ]] || stale_download_claims=0 |
| stale_download_partials="$(cleanup_stale_download_partials 2>/dev/null || true)" |
| [[ "$stale_download_partials" =~ ^[0-9]+$ ]] || stale_download_partials=0 |
| stale_process_result="$(cleanup_stale_process_jobs 2>/dev/null || true)" |
| IFS="|" read -r stale_process_jobs stale_process_claims <<< "$stale_process_result" |
| [[ "$stale_process_jobs" =~ ^[0-9]+$ ]] || stale_process_jobs=0 |
| [[ "$stale_process_claims" =~ ^[0-9]+$ ]] || stale_process_claims=0 |
| stale_orphan_process_jobs="$(cleanup_orphan_pending_process_jobs 2>/dev/null || true)" |
| [[ "$stale_orphan_process_jobs" =~ ^[0-9]+$ ]] || stale_orphan_process_jobs=0 |
| local pending_download pending_process raw_video_bytes |
| pending_download="$(count_pending_downloads 2>/dev/null || true)" |
| [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0 |
| pending_process="$(count_pending_process 2>/dev/null || true)" |
| [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0 |
| raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR" 2>/dev/null || true)" |
| [[ "$raw_video_bytes" =~ ^[0-9]+$ ]] || raw_video_bytes=0 |
| echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims stale_download_partials=$stale_download_partials stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs" |
|
|
| if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then |
| echo "[download] reached max iterations: $MAX_ITERATIONS" |
| break |
| fi |
| if [[ "$pending_download" -eq 0 ]]; then |
| run_sync_csv_stage >/dev/null 2>&1 || true |
| pending_download="$(count_pending_downloads 2>/dev/null || true)" |
| [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0 |
| if [[ "$pending_download" -eq 0 ]]; then |
| local stop_guard source_rows terminal_rows uploaded_rows live_download_jobs |
| stop_guard="$(count_download_stop_guard 2>/dev/null || true)" |
| IFS="|" read -r source_rows terminal_rows uploaded_rows live_download_jobs <<< "$stop_guard" |
| [[ "$source_rows" =~ ^[0-9]+$ ]] || source_rows=0 |
| [[ "$terminal_rows" =~ ^[0-9]+$ ]] || terminal_rows=0 |
| [[ "$uploaded_rows" =~ ^[0-9]+$ ]] || uploaded_rows=0 |
| [[ "$live_download_jobs" =~ ^[0-9]+$ ]] || live_download_jobs=0 |
| if [[ "$live_download_jobs" -gt 0 || "$terminal_rows" -lt "$source_rows" ]]; then |
| echo "[download] stop guard blocked exit: pending_download=$pending_download source_rows=$source_rows terminal_rows=$terminal_rows uploaded_rows=$uploaded_rows live_download_jobs=$live_download_jobs" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
| echo "[download] nothing left to download" |
| break |
| fi |
| fi |
| if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then |
| echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
| if [[ "$MAX_RAW_VIDEO_BYTES" -gt 0 && "$raw_video_bytes" -ge "$MAX_RAW_VIDEO_BYTES" ]]; then |
| echo "[download] backpressure: raw_video_bytes $raw_video_bytes >= limit $MAX_RAW_VIDEO_BYTES" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
|
|
| if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then |
| echo "[download] pipeline01 failed; retry after sleep" |
| sleep "$IDLE_SLEEP_SECONDS" |
| else |
| run_sync_csv_stage >/dev/null 2>&1 || true |
| fi |
|
|
| pending_process="$(count_pending_process 2>/dev/null || true)" |
| [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0 |
| local active_process_claims |
| active_process_claims="$(count_active_process_claims 2>/dev/null || true)" |
| [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0 |
| if [[ "$pending_process" -ge "$MIN_PROCESS_START_BACKLOG" && "$active_process_claims" -eq 0 ]]; then |
| if ! run_process_stage "$PROCESS_BATCH_SIZE"; then |
| echo "[download] fallback process submit failed" |
| else |
| echo "[download] fallback process submit succeeded count=$RUN_PROCESS_STAGE_SUBMITTED_COUNT" |
| fi |
| fi |
| done |
| } |
|
|
| process_loop() { |
| local iteration=0 |
| local submitted_total=0 |
| echo "[process] loop started" |
| while true; do |
| iteration=$((iteration + 1)) |
| local pruned stale_process_result stale_process_jobs stale_process_claims stale_orphan_process_jobs |
| pruned="$(prune_processed_raw_videos 2>/dev/null || true)" |
| [[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0 |
| stale_process_result="$(cleanup_stale_process_jobs 2>/dev/null || true)" |
| IFS="|" read -r stale_process_jobs stale_process_claims <<< "$stale_process_result" |
| [[ "$stale_process_jobs" =~ ^[0-9]+$ ]] || stale_process_jobs=0 |
| [[ "$stale_process_claims" =~ ^[0-9]+$ ]] || stale_process_claims=0 |
| stale_orphan_process_jobs="$(cleanup_orphan_pending_process_jobs 2>/dev/null || true)" |
| [[ "$stale_orphan_process_jobs" =~ ^[0-9]+$ ]] || stale_orphan_process_jobs=0 |
| local pending_download pending_process active_process_claims remaining_limit cycle_limit |
| pending_download="$(count_pending_downloads 2>/dev/null || true)" |
| [[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0 |
| pending_process="$(count_pending_process 2>/dev/null || true)" |
| [[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0 |
| active_process_claims="$(count_active_process_claims 2>/dev/null || true)" |
| [[ "$active_process_claims" =~ ^[0-9]+$ ]] || active_process_claims=0 |
| echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process slurm_backlog=$active_process_claims submitted_total=$submitted_total pruned_raw_videos=$pruned stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs" |
|
|
| if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then |
| echo "[process] reached max iterations: $MAX_ITERATIONS" |
| break |
| fi |
| if [[ -n "$LIMIT" ]]; then |
| remaining_limit=$((LIMIT - submitted_total)) |
| if [[ "$remaining_limit" -le 0 ]]; then |
| if [[ "$active_process_claims" -eq 0 ]]; then |
| echo "[process] reached submission limit: $submitted_total >= $LIMIT" |
| break |
| fi |
| echo "[process] submission limit reached; waiting for in-flight tasks to finish" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
| else |
| remaining_limit=-1 |
| fi |
| if [[ "$pending_process" -eq 0 ]]; then |
| if [[ "$pending_download" -eq 0 && "$active_process_claims" -eq 0 ]]; then |
| echo "[process] nothing left to process" |
| break |
| fi |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
| if [[ "$active_process_claims" -eq 0 && "$pending_process" -lt "$MIN_PROCESS_START_BACKLOG" && "$pending_download" -gt 0 ]]; then |
| echo "[process] waiting for minimum raw backlog: $pending_process < $MIN_PROCESS_START_BACKLOG" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
| if [[ "$active_process_claims" -ge "$MAX_BACKLOG_VIDEOS" ]]; then |
| echo "[process] backpressure: slurm backlog $active_process_claims >= limit $MAX_BACKLOG_VIDEOS" |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
|
|
| cycle_limit="$PROCESS_BATCH_SIZE" |
| if [[ "$remaining_limit" -gt 0 && "$remaining_limit" -lt "$cycle_limit" ]]; then |
| cycle_limit="$remaining_limit" |
| fi |
|
|
| if ! run_process_stage "$cycle_limit"; then |
| echo "[process] slurm submit failed; retry after sleep" |
| sleep "$IDLE_SLEEP_SECONDS" |
| else |
| submitted_total=$((submitted_total + RUN_PROCESS_STAGE_SUBMITTED_COUNT)) |
| run_sync_csv_stage >/dev/null 2>&1 || true |
| fi |
| sleep "$IDLE_SLEEP_SECONDS" |
| done |
| } |
|
|
| upload_loop() { |
| local iteration=0 |
| while true; do |
| iteration=$((iteration + 1)) |
| local pruned |
| pruned="$(prune_processed_raw_videos)" |
| local pending_download pending_process complete_pending_upload complete_pending_upload_bytes |
| pending_download="$(count_pending_downloads)" |
| pending_process="$(count_pending_process)" |
| complete_pending_upload="$(count_complete_pending_upload)" |
| complete_pending_upload_bytes="$(bytes_complete_pending_upload)" |
| echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned" |
|
|
| if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then |
| echo "[upload] reached max iterations: $MAX_ITERATIONS" |
| break |
| fi |
| if [[ "$complete_pending_upload" -eq 0 ]]; then |
| if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then |
| echo "[upload] nothing left to upload" |
| break |
| fi |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
|
|
| if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && "$complete_pending_upload" -lt "$TARGET_FOLDERS" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then |
| sleep "$IDLE_SLEEP_SECONDS" |
| continue |
| fi |
|
|
| local require_target=1 |
| if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then |
| require_target=0 |
| fi |
| if ! run_upload_stage "$require_target"; then |
| echo "[upload] pipeline03 failed; retry after sleep" |
| sleep "$IDLE_SLEEP_SECONDS" |
| else |
| run_sync_csv_stage >/dev/null 2>&1 || true |
| fi |
| done |
| } |
|
|
| cleanup_background_jobs() { |
| local jobs_to_kill=("$@") |
| for job_pid in "${jobs_to_kill[@]}"; do |
| if [[ -n "$job_pid" ]] && kill -0 "$job_pid" 2>/dev/null; then |
| kill "$job_pid" 2>/dev/null || true |
| fi |
| done |
| } |
|
|
| run_all_loop() { |
| DOWNLOAD_LOOP_PID="" |
| PROCESS_LOOP_PID="" |
| UPLOAD_LOOP_PID="" |
|
|
| download_loop & |
| DOWNLOAD_LOOP_PID=$! |
| process_loop & |
| PROCESS_LOOP_PID=$! |
| upload_loop & |
| UPLOAD_LOOP_PID=$! |
|
|
| trap 'cleanup_background_jobs "$DOWNLOAD_LOOP_PID" "$PROCESS_LOOP_PID" "$UPLOAD_LOOP_PID"' INT TERM EXIT |
|
|
| wait "$DOWNLOAD_LOOP_PID" |
| wait "$PROCESS_LOOP_PID" |
| wait "$UPLOAD_LOOP_PID" |
|
|
| trap - INT TERM EXIT |
| } |
|
|
| case "$STAGE" in |
| download) |
| run_download_stage |
| ;; |
| process) |
| run_process_stage |
| ;; |
| upload) |
| run_upload_stage |
| ;; |
| all) |
| run_all_loop |
| ;; |
| esac |
|
|