FangSen9000 commited on
Commit ·
2b36601
1
Parent(s): 40e6906
Add reproducible Sign-DWPose-2M pipeline and visualization tools
Browse files- .gitignore +1 -0
- Sign-DWPose-2M-metadata_ori.csv +0 -0
- reproduce_independently.sh +573 -0
- scripts/__pycache__/pipeline01_download_video_fix_caption.cpython-312.pyc +0 -0
- scripts/__pycache__/pipeline02_extract_dwpose_from_video.cpython-312.pyc +0 -0
- scripts/__pycache__/pipeline03_upload_to_huggingface.cpython-312.pyc +0 -0
- scripts/__pycache__/visualize_dwpose_npz.cpython-312.pyc +0 -0
- scripts/pipeline01_download_video_fix_caption.py +693 -0
- scripts/pipeline02_extract_dwpose_from_video.py +247 -0
- scripts/pipeline03_upload_to_huggingface.py +266 -0
- scripts/visualize_dwpose_npz.py +527 -0
- utils/__pycache__/draw_dw_lib.cpython-310.pyc +0 -0
- utils/__pycache__/draw_dw_lib.cpython-38.pyc +0 -0
- utils/__pycache__/preprocess_video.cpython-310.pyc +0 -0
- utils/__pycache__/preprocess_video.cpython-38.pyc +0 -0
- utils/__pycache__/preprocess_video_improve.cpython-310.pyc +0 -0
- utils/__pycache__/stats_npz.cpython-310.pyc +0 -0
- utils/__pycache__/stats_npz.cpython-312.pyc +0 -0
- utils/__pycache__/util.cpython-310.pyc +0 -0
- utils/__pycache__/util.cpython-38.pyc +0 -0
- utils/draw_dw_lib.py +294 -0
- utils/preprocess_video.py +259 -0
- utils/preprocess_video_improve.py +258 -0
- utils/stats_npz.py +95 -0
- utils/util.py +128 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.txt
|
Sign-DWPose-2M-metadata_ori.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
reproduce_independently.sh
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 5 |
+
CONDA_SH="/research/cbim/vast/sf895/miniforge3/etc/profile.d/conda.sh"
|
| 6 |
+
CONDA_ENV="dwpose"
|
| 7 |
+
|
| 8 |
+
SOURCE_METADATA_CSV="$ROOT_DIR/Sign-DWPose-2M-metadata_ori.csv"
|
| 9 |
+
OUTPUT_METADATA_CSV="$ROOT_DIR/Sign-DWPose-2M-metadata_processed.csv"
|
| 10 |
+
RAW_VIDEO_DIR="$ROOT_DIR/raw_video"
|
| 11 |
+
RAW_CAPTION_DIR="$ROOT_DIR/raw_caption"
|
| 12 |
+
RAW_METADATA_DIR="$ROOT_DIR/raw_metadata"
|
| 13 |
+
DATASET_DIR="$ROOT_DIR/dataset"
|
| 14 |
+
ARCHIVE_DIR="$ROOT_DIR/archives"
|
| 15 |
+
STATS_NPZ="$ROOT_DIR/stats.npz"
|
| 16 |
+
PROGRESS_JSON="$ROOT_DIR/archive_upload_progress.json"
|
| 17 |
+
|
| 18 |
+
PIPELINE01="$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py"
|
| 19 |
+
PIPELINE02="$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py"
|
| 20 |
+
PIPELINE03="$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py"
|
| 21 |
+
|
| 22 |
+
STAGE="all"
|
| 23 |
+
LIMIT=""
|
| 24 |
+
VIDEO_IDS=()
|
| 25 |
+
FPS="24"
|
| 26 |
+
WORKERS=""
|
| 27 |
+
TARGET_BYTES="$((14 * 1024 * 1024 * 1024))"
|
| 28 |
+
DOWNLOAD_BATCH_SIZE="1"
|
| 29 |
+
PROCESS_BATCH_SIZE=""
|
| 30 |
+
RAW_BACKLOG_LIMIT="340"
|
| 31 |
+
MAX_RAW_VIDEO_BYTES="0"
|
| 32 |
+
MAX_ITERATIONS="0"
|
| 33 |
+
IDLE_SLEEP_SECONDS="5"
|
| 34 |
+
REPO_ID="SignerX/Sign-DWPose-2M"
|
| 35 |
+
COOKIES_FILE=""
|
| 36 |
+
COOKIES_FROM_BROWSER=""
|
| 37 |
+
EXTRACTOR_ARGS=""
|
| 38 |
+
|
| 39 |
+
FORCE_METADATA=0
|
| 40 |
+
FORCE_SUBTITLES=0
|
| 41 |
+
FORCE_DOWNLOAD=0
|
| 42 |
+
FORCE_PROCESS=0
|
| 43 |
+
SKIP_VIDEO_DOWNLOAD=0
|
| 44 |
+
SKIP_SUBTITLES=0
|
| 45 |
+
DRY_RUN_UPLOAD=0
|
| 46 |
+
|
| 47 |
+
print_usage() {
|
| 48 |
+
cat <<EOF
|
| 49 |
+
Usage:
|
| 50 |
+
bash reproduce_independently.sh [options]
|
| 51 |
+
|
| 52 |
+
Options:
|
| 53 |
+
--stage {all,download,process,upload}
|
| 54 |
+
--limit N
|
| 55 |
+
--video-id ID
|
| 56 |
+
--video-ids "ID1 ID2 ..."
|
| 57 |
+
--fps N
|
| 58 |
+
--workers N
|
| 59 |
+
--target-bytes N
|
| 60 |
+
--download-batch-size N
|
| 61 |
+
--process-batch-size N
|
| 62 |
+
--raw-backlog-limit N
|
| 63 |
+
--max-raw-video-bytes N
|
| 64 |
+
--max-iterations N
|
| 65 |
+
--idle-sleep-seconds N
|
| 66 |
+
--repo-id REPO
|
| 67 |
+
--cookies FILE
|
| 68 |
+
--cookies-from-browser BROWSER
|
| 69 |
+
--extractor-args VALUE
|
| 70 |
+
--force-metadata
|
| 71 |
+
--force-subtitles
|
| 72 |
+
--force-download
|
| 73 |
+
--force-process
|
| 74 |
+
--skip-video-download
|
| 75 |
+
--skip-subtitles
|
| 76 |
+
--dry-run-upload
|
| 77 |
+
--help
|
| 78 |
+
|
| 79 |
+
Examples:
|
| 80 |
+
bash reproduce_independently.sh --stage download --limit 10 --skip-video-download
|
| 81 |
+
bash reproduce_independently.sh --stage process --video-id Bdj5MUf_3Hc --workers 1
|
| 82 |
+
bash reproduce_independently.sh --stage upload --target-bytes 500000000
|
| 83 |
+
bash reproduce_independently.sh --stage all --workers 8 --download-batch-size 1 --raw-backlog-limit 340
|
| 84 |
+
EOF
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
while [[ $# -gt 0 ]]; do
|
| 88 |
+
case "$1" in
|
| 89 |
+
--stage)
|
| 90 |
+
STAGE="$2"
|
| 91 |
+
shift 2
|
| 92 |
+
;;
|
| 93 |
+
--limit)
|
| 94 |
+
LIMIT="$2"
|
| 95 |
+
shift 2
|
| 96 |
+
;;
|
| 97 |
+
--video-id)
|
| 98 |
+
VIDEO_IDS+=("$2")
|
| 99 |
+
shift 2
|
| 100 |
+
;;
|
| 101 |
+
--video-ids)
|
| 102 |
+
IFS=' ' read -r -a EXTRA_IDS <<< "$2"
|
| 103 |
+
VIDEO_IDS+=("${EXTRA_IDS[@]}")
|
| 104 |
+
shift 2
|
| 105 |
+
;;
|
| 106 |
+
--fps)
|
| 107 |
+
FPS="$2"
|
| 108 |
+
shift 2
|
| 109 |
+
;;
|
| 110 |
+
--workers)
|
| 111 |
+
WORKERS="$2"
|
| 112 |
+
shift 2
|
| 113 |
+
;;
|
| 114 |
+
--target-bytes)
|
| 115 |
+
TARGET_BYTES="$2"
|
| 116 |
+
shift 2
|
| 117 |
+
;;
|
| 118 |
+
--download-batch-size)
|
| 119 |
+
DOWNLOAD_BATCH_SIZE="$2"
|
| 120 |
+
shift 2
|
| 121 |
+
;;
|
| 122 |
+
--process-batch-size)
|
| 123 |
+
PROCESS_BATCH_SIZE="$2"
|
| 124 |
+
shift 2
|
| 125 |
+
;;
|
| 126 |
+
--raw-backlog-limit)
|
| 127 |
+
RAW_BACKLOG_LIMIT="$2"
|
| 128 |
+
shift 2
|
| 129 |
+
;;
|
| 130 |
+
--max-raw-video-bytes)
|
| 131 |
+
MAX_RAW_VIDEO_BYTES="$2"
|
| 132 |
+
shift 2
|
| 133 |
+
;;
|
| 134 |
+
--max-iterations)
|
| 135 |
+
MAX_ITERATIONS="$2"
|
| 136 |
+
shift 2
|
| 137 |
+
;;
|
| 138 |
+
--idle-sleep-seconds)
|
| 139 |
+
IDLE_SLEEP_SECONDS="$2"
|
| 140 |
+
shift 2
|
| 141 |
+
;;
|
| 142 |
+
--repo-id)
|
| 143 |
+
REPO_ID="$2"
|
| 144 |
+
shift 2
|
| 145 |
+
;;
|
| 146 |
+
--cookies)
|
| 147 |
+
COOKIES_FILE="$2"
|
| 148 |
+
shift 2
|
| 149 |
+
;;
|
| 150 |
+
--cookies-from-browser)
|
| 151 |
+
COOKIES_FROM_BROWSER="$2"
|
| 152 |
+
shift 2
|
| 153 |
+
;;
|
| 154 |
+
--extractor-args)
|
| 155 |
+
EXTRACTOR_ARGS="$2"
|
| 156 |
+
shift 2
|
| 157 |
+
;;
|
| 158 |
+
--force-metadata)
|
| 159 |
+
FORCE_METADATA=1
|
| 160 |
+
shift
|
| 161 |
+
;;
|
| 162 |
+
--force-subtitles)
|
| 163 |
+
FORCE_SUBTITLES=1
|
| 164 |
+
shift
|
| 165 |
+
;;
|
| 166 |
+
--force-download)
|
| 167 |
+
FORCE_DOWNLOAD=1
|
| 168 |
+
shift
|
| 169 |
+
;;
|
| 170 |
+
--force-process)
|
| 171 |
+
FORCE_PROCESS=1
|
| 172 |
+
shift
|
| 173 |
+
;;
|
| 174 |
+
--skip-video-download)
|
| 175 |
+
SKIP_VIDEO_DOWNLOAD=1
|
| 176 |
+
shift
|
| 177 |
+
;;
|
| 178 |
+
--skip-subtitles)
|
| 179 |
+
SKIP_SUBTITLES=1
|
| 180 |
+
shift
|
| 181 |
+
;;
|
| 182 |
+
--dry-run-upload)
|
| 183 |
+
DRY_RUN_UPLOAD=1
|
| 184 |
+
shift
|
| 185 |
+
;;
|
| 186 |
+
-h|--help)
|
| 187 |
+
print_usage
|
| 188 |
+
exit 0
|
| 189 |
+
;;
|
| 190 |
+
*)
|
| 191 |
+
echo "Unknown argument: $1" >&2
|
| 192 |
+
print_usage
|
| 193 |
+
exit 1
|
| 194 |
+
;;
|
| 195 |
+
esac
|
| 196 |
+
done
|
| 197 |
+
|
| 198 |
+
if [[ ! -f "$CONDA_SH" ]]; then
|
| 199 |
+
echo "Missing conda init script: $CONDA_SH" >&2
|
| 200 |
+
exit 1
|
| 201 |
+
fi
|
| 202 |
+
|
| 203 |
+
if [[ "$STAGE" != "all" && "$STAGE" != "download" && "$STAGE" != "process" && "$STAGE" != "upload" ]]; then
|
| 204 |
+
echo "Invalid --stage: $STAGE" >&2
|
| 205 |
+
exit 1
|
| 206 |
+
fi
|
| 207 |
+
|
| 208 |
+
mkdir -p "$RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR"
|
| 209 |
+
|
| 210 |
+
run_in_dwpose() {
|
| 211 |
+
# shellcheck disable=SC1090
|
| 212 |
+
source "$CONDA_SH"
|
| 213 |
+
CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "$@"
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
append_video_ids() {
|
| 217 |
+
local -n target_ref=$1
|
| 218 |
+
if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
|
| 219 |
+
target_ref+=(--video-ids "${VIDEO_IDS[@]}")
|
| 220 |
+
fi
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
run_download_stage() {
|
| 224 |
+
local stage_limit="${1:-$LIMIT}"
|
| 225 |
+
local cmd=(python "$PIPELINE01"
|
| 226 |
+
--source-metadata-csv "$SOURCE_METADATA_CSV"
|
| 227 |
+
--output-metadata-csv "$OUTPUT_METADATA_CSV"
|
| 228 |
+
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 229 |
+
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 230 |
+
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 231 |
+
--dataset-dir "$DATASET_DIR"
|
| 232 |
+
--stats-npz "$STATS_NPZ"
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
if [[ -n "$stage_limit" ]]; then
|
| 236 |
+
cmd+=(--limit "$stage_limit")
|
| 237 |
+
fi
|
| 238 |
+
append_video_ids cmd
|
| 239 |
+
if [[ $FORCE_METADATA -eq 1 ]]; then
|
| 240 |
+
cmd+=(--force-metadata)
|
| 241 |
+
fi
|
| 242 |
+
if [[ $FORCE_SUBTITLES -eq 1 ]]; then
|
| 243 |
+
cmd+=(--force-subtitles)
|
| 244 |
+
fi
|
| 245 |
+
if [[ $FORCE_DOWNLOAD -eq 1 ]]; then
|
| 246 |
+
cmd+=(--force-download)
|
| 247 |
+
fi
|
| 248 |
+
if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then
|
| 249 |
+
cmd+=(--skip-video-download)
|
| 250 |
+
fi
|
| 251 |
+
if [[ $SKIP_SUBTITLES -eq 1 ]]; then
|
| 252 |
+
cmd+=(--skip-subtitles)
|
| 253 |
+
fi
|
| 254 |
+
if [[ -n "$COOKIES_FROM_BROWSER" ]]; then
|
| 255 |
+
cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
|
| 256 |
+
fi
|
| 257 |
+
if [[ -n "$COOKIES_FILE" ]]; then
|
| 258 |
+
cmd+=(--cookies "$COOKIES_FILE")
|
| 259 |
+
fi
|
| 260 |
+
if [[ -n "$EXTRACTOR_ARGS" ]]; then
|
| 261 |
+
cmd+=(--extractor-args "$EXTRACTOR_ARGS")
|
| 262 |
+
fi
|
| 263 |
+
|
| 264 |
+
run_in_dwpose "${cmd[@]}"
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
run_process_stage() {
|
| 268 |
+
local stage_limit="${1:-$LIMIT}"
|
| 269 |
+
local cmd=(python "$PIPELINE02"
|
| 270 |
+
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 271 |
+
--dataset-dir "$DATASET_DIR"
|
| 272 |
+
--stats-npz "$STATS_NPZ"
|
| 273 |
+
--fps "$FPS"
|
| 274 |
+
--delete-source-on-success
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
if [[ -n "$stage_limit" ]]; then
|
| 278 |
+
cmd+=(--limit "$stage_limit")
|
| 279 |
+
fi
|
| 280 |
+
append_video_ids cmd
|
| 281 |
+
if [[ -n "$WORKERS" ]]; then
|
| 282 |
+
cmd+=(--workers "$WORKERS")
|
| 283 |
+
fi
|
| 284 |
+
if [[ $FORCE_PROCESS -eq 1 ]]; then
|
| 285 |
+
cmd+=(--force)
|
| 286 |
+
fi
|
| 287 |
+
|
| 288 |
+
run_in_dwpose "${cmd[@]}"
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
run_upload_stage() {
|
| 292 |
+
local require_target="${1:-0}"
|
| 293 |
+
local cmd=(python "$PIPELINE03"
|
| 294 |
+
--dataset-dir "$DATASET_DIR"
|
| 295 |
+
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 296 |
+
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 297 |
+
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 298 |
+
--archive-dir "$ARCHIVE_DIR"
|
| 299 |
+
--progress-path "$PROGRESS_JSON"
|
| 300 |
+
--stats-npz "$STATS_NPZ"
|
| 301 |
+
--repo-id "$REPO_ID"
|
| 302 |
+
--target-bytes "$TARGET_BYTES"
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
if [[ "$require_target" == "1" ]]; then
|
| 306 |
+
cmd+=(--require-target-bytes)
|
| 307 |
+
fi
|
| 308 |
+
if [[ $DRY_RUN_UPLOAD -eq 1 ]]; then
|
| 309 |
+
cmd+=(--dry-run)
|
| 310 |
+
fi
|
| 311 |
+
|
| 312 |
+
run_in_dwpose "${cmd[@]}"
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
prune_processed_raw_videos() {
|
| 316 |
+
python - <<PY
|
| 317 |
+
from pathlib import Path
|
| 318 |
+
raw_dir = Path("$RAW_VIDEO_DIR")
|
| 319 |
+
dataset_dir = Path("$DATASET_DIR")
|
| 320 |
+
deleted = 0
|
| 321 |
+
if raw_dir.exists():
|
| 322 |
+
for video_path in raw_dir.iterdir():
|
| 323 |
+
if not video_path.is_file():
|
| 324 |
+
continue
|
| 325 |
+
marker = dataset_dir / video_path.stem / "npz" / ".complete"
|
| 326 |
+
if marker.exists():
|
| 327 |
+
video_path.unlink(missing_ok=True)
|
| 328 |
+
deleted += 1
|
| 329 |
+
print(deleted)
|
| 330 |
+
PY
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
dir_size_bytes() {
|
| 334 |
+
local dir_path="$1"
|
| 335 |
+
if [[ ! -d "$dir_path" ]]; then
|
| 336 |
+
echo 0
|
| 337 |
+
return
|
| 338 |
+
fi
|
| 339 |
+
find "$dir_path" -type f -printf '%s\n' | awk '{sum+=$1} END {print sum+0}'
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
count_pending_downloads() {
|
| 343 |
+
python - <<PY
|
| 344 |
+
import csv, sys
|
| 345 |
+
from pathlib import Path
|
| 346 |
+
csv.field_size_limit(min(sys.maxsize, 10 * 1024 * 1024))
|
| 347 |
+
path = Path("$OUTPUT_METADATA_CSV")
|
| 348 |
+
if not path.exists():
|
| 349 |
+
path = Path("$SOURCE_METADATA_CSV")
|
| 350 |
+
pending = 0
|
| 351 |
+
with path.open("r", encoding="utf-8-sig", newline="") as handle:
|
| 352 |
+
reader = csv.DictReader(handle)
|
| 353 |
+
for row in reader:
|
| 354 |
+
if (row.get("download_status") or "").strip() == "ok":
|
| 355 |
+
continue
|
| 356 |
+
pending += 1
|
| 357 |
+
print(pending)
|
| 358 |
+
PY
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
count_pending_process() {
|
| 362 |
+
python - <<PY
|
| 363 |
+
from pathlib import Path
|
| 364 |
+
raw_dir = Path("$RAW_VIDEO_DIR")
|
| 365 |
+
pending = 0
|
| 366 |
+
if raw_dir.exists():
|
| 367 |
+
for video_path in raw_dir.iterdir():
|
| 368 |
+
if video_path.is_file():
|
| 369 |
+
pending += 1
|
| 370 |
+
print(pending)
|
| 371 |
+
PY
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
count_complete_pending_upload() {
|
| 375 |
+
python - <<PY
|
| 376 |
+
import json
|
| 377 |
+
from pathlib import Path
|
| 378 |
+
dataset_dir = Path("$DATASET_DIR")
|
| 379 |
+
progress_path = Path("$PROGRESS_JSON")
|
| 380 |
+
uploaded = set()
|
| 381 |
+
if progress_path.exists():
|
| 382 |
+
uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
|
| 383 |
+
count = 0
|
| 384 |
+
for folder_path in dataset_dir.iterdir():
|
| 385 |
+
if not folder_path.is_dir():
|
| 386 |
+
continue
|
| 387 |
+
if folder_path.name in uploaded:
|
| 388 |
+
continue
|
| 389 |
+
if (folder_path / "npz" / ".complete").exists():
|
| 390 |
+
count += 1
|
| 391 |
+
print(count)
|
| 392 |
+
PY
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
bytes_complete_pending_upload() {
|
| 396 |
+
python - <<PY
|
| 397 |
+
import json
|
| 398 |
+
from pathlib import Path
|
| 399 |
+
dataset_dir = Path("$DATASET_DIR")
|
| 400 |
+
progress_path = Path("$PROGRESS_JSON")
|
| 401 |
+
uploaded = set()
|
| 402 |
+
if progress_path.exists():
|
| 403 |
+
uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
|
| 404 |
+
total = 0
|
| 405 |
+
for folder_path in dataset_dir.iterdir():
|
| 406 |
+
if not folder_path.is_dir():
|
| 407 |
+
continue
|
| 408 |
+
if folder_path.name in uploaded:
|
| 409 |
+
continue
|
| 410 |
+
if not (folder_path / "npz" / ".complete").exists():
|
| 411 |
+
continue
|
| 412 |
+
for path in folder_path.rglob("*"):
|
| 413 |
+
if path.is_file():
|
| 414 |
+
total += path.stat().st_size
|
| 415 |
+
print(total)
|
| 416 |
+
PY
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
download_loop() {
|
| 420 |
+
local iteration=0
|
| 421 |
+
while true; do
|
| 422 |
+
iteration=$((iteration + 1))
|
| 423 |
+
local pruned
|
| 424 |
+
pruned="$(prune_processed_raw_videos)"
|
| 425 |
+
local pending_download pending_process raw_video_bytes
|
| 426 |
+
pending_download="$(count_pending_downloads)"
|
| 427 |
+
pending_process="$(count_pending_process)"
|
| 428 |
+
raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR")"
|
| 429 |
+
echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned"
|
| 430 |
+
|
| 431 |
+
if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
|
| 432 |
+
echo "[download] reached max iterations: $MAX_ITERATIONS"
|
| 433 |
+
break
|
| 434 |
+
fi
|
| 435 |
+
if [[ "$pending_download" -eq 0 ]]; then
|
| 436 |
+
echo "[download] nothing left to download"
|
| 437 |
+
break
|
| 438 |
+
fi
|
| 439 |
+
if [[ "$pending_process" -ge "$RAW_BACKLOG_LIMIT" ]]; then
|
| 440 |
+
echo "[download] backpressure: raw backlog $pending_process >= limit $RAW_BACKLOG_LIMIT"
|
| 441 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 442 |
+
continue
|
| 443 |
+
fi
|
| 444 |
+
if [[ "$MAX_RAW_VIDEO_BYTES" -gt 0 && "$raw_video_bytes" -ge "$MAX_RAW_VIDEO_BYTES" ]]; then
|
| 445 |
+
echo "[download] backpressure: raw_video_bytes $raw_video_bytes >= limit $MAX_RAW_VIDEO_BYTES"
|
| 446 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 447 |
+
continue
|
| 448 |
+
fi
|
| 449 |
+
|
| 450 |
+
if ! run_download_stage "$DOWNLOAD_BATCH_SIZE"; then
|
| 451 |
+
echo "[download] pipeline01 failed; retry after sleep"
|
| 452 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 453 |
+
fi
|
| 454 |
+
done
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
process_loop() {
|
| 458 |
+
local iteration=0
|
| 459 |
+
while true; do
|
| 460 |
+
iteration=$((iteration + 1))
|
| 461 |
+
local pruned
|
| 462 |
+
pruned="$(prune_processed_raw_videos)"
|
| 463 |
+
local pending_download pending_process
|
| 464 |
+
pending_download="$(count_pending_downloads)"
|
| 465 |
+
pending_process="$(count_pending_process)"
|
| 466 |
+
echo "[process] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process pruned_raw_videos=$pruned"
|
| 467 |
+
|
| 468 |
+
if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
|
| 469 |
+
echo "[process] reached max iterations: $MAX_ITERATIONS"
|
| 470 |
+
break
|
| 471 |
+
fi
|
| 472 |
+
if [[ "$pending_process" -eq 0 ]]; then
|
| 473 |
+
if [[ "$pending_download" -eq 0 ]]; then
|
| 474 |
+
echo "[process] nothing left to process"
|
| 475 |
+
break
|
| 476 |
+
fi
|
| 477 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 478 |
+
continue
|
| 479 |
+
fi
|
| 480 |
+
|
| 481 |
+
if ! run_process_stage "$PROCESS_BATCH_SIZE"; then
|
| 482 |
+
echo "[process] pipeline02 failed; retry after sleep"
|
| 483 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 484 |
+
fi
|
| 485 |
+
done
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
upload_loop() {
|
| 489 |
+
local iteration=0
|
| 490 |
+
while true; do
|
| 491 |
+
iteration=$((iteration + 1))
|
| 492 |
+
local pruned
|
| 493 |
+
pruned="$(prune_processed_raw_videos)"
|
| 494 |
+
local pending_download pending_process complete_pending_upload complete_pending_upload_bytes
|
| 495 |
+
pending_download="$(count_pending_downloads)"
|
| 496 |
+
pending_process="$(count_pending_process)"
|
| 497 |
+
complete_pending_upload="$(count_complete_pending_upload)"
|
| 498 |
+
complete_pending_upload_bytes="$(bytes_complete_pending_upload)"
|
| 499 |
+
echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned"
|
| 500 |
+
|
| 501 |
+
if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
|
| 502 |
+
echo "[upload] reached max iterations: $MAX_ITERATIONS"
|
| 503 |
+
break
|
| 504 |
+
fi
|
| 505 |
+
if [[ "$complete_pending_upload" -eq 0 ]]; then
|
| 506 |
+
if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then
|
| 507 |
+
echo "[upload] nothing left to upload"
|
| 508 |
+
break
|
| 509 |
+
fi
|
| 510 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 511 |
+
continue
|
| 512 |
+
fi
|
| 513 |
+
|
| 514 |
+
if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
|
| 515 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 516 |
+
continue
|
| 517 |
+
fi
|
| 518 |
+
|
| 519 |
+
local require_target=1
|
| 520 |
+
if [[ "$pending_download" -eq 0 && "$pending_process" -eq 0 ]]; then
|
| 521 |
+
require_target=0
|
| 522 |
+
fi
|
| 523 |
+
if ! run_upload_stage "$require_target"; then
|
| 524 |
+
echo "[upload] pipeline03 failed; retry after sleep"
|
| 525 |
+
sleep "$IDLE_SLEEP_SECONDS"
|
| 526 |
+
fi
|
| 527 |
+
done
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
cleanup_background_jobs() {
|
| 531 |
+
local jobs_to_kill=("$@")
|
| 532 |
+
for job_pid in "${jobs_to_kill[@]}"; do
|
| 533 |
+
if [[ -n "$job_pid" ]] && kill -0 "$job_pid" 2>/dev/null; then
|
| 534 |
+
kill "$job_pid" 2>/dev/null || true
|
| 535 |
+
fi
|
| 536 |
+
done
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
run_all_loop() {
|
| 540 |
+
DOWNLOAD_LOOP_PID=""
|
| 541 |
+
PROCESS_LOOP_PID=""
|
| 542 |
+
UPLOAD_LOOP_PID=""
|
| 543 |
+
|
| 544 |
+
download_loop &
|
| 545 |
+
DOWNLOAD_LOOP_PID=$!
|
| 546 |
+
process_loop &
|
| 547 |
+
PROCESS_LOOP_PID=$!
|
| 548 |
+
upload_loop &
|
| 549 |
+
UPLOAD_LOOP_PID=$!
|
| 550 |
+
|
| 551 |
+
trap 'cleanup_background_jobs "$DOWNLOAD_LOOP_PID" "$PROCESS_LOOP_PID" "$UPLOAD_LOOP_PID"' INT TERM EXIT
|
| 552 |
+
|
| 553 |
+
wait "$DOWNLOAD_LOOP_PID"
|
| 554 |
+
wait "$PROCESS_LOOP_PID"
|
| 555 |
+
wait "$UPLOAD_LOOP_PID"
|
| 556 |
+
|
| 557 |
+
trap - INT TERM EXIT
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
case "$STAGE" in
|
| 561 |
+
download)
|
| 562 |
+
run_download_stage
|
| 563 |
+
;;
|
| 564 |
+
process)
|
| 565 |
+
run_process_stage
|
| 566 |
+
;;
|
| 567 |
+
upload)
|
| 568 |
+
run_upload_stage
|
| 569 |
+
;;
|
| 570 |
+
all)
|
| 571 |
+
run_all_loop
|
| 572 |
+
;;
|
| 573 |
+
esac
|
scripts/__pycache__/pipeline01_download_video_fix_caption.cpython-312.pyc
ADDED
|
Binary file (35.9 kB). View file
|
|
|
scripts/__pycache__/pipeline02_extract_dwpose_from_video.cpython-312.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
scripts/__pycache__/pipeline03_upload_to_huggingface.cpython-312.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
scripts/__pycache__/visualize_dwpose_npz.cpython-312.pyc
ADDED
|
Binary file (29.6 kB). View file
|
|
|
scripts/pipeline01_download_video_fix_caption.py
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import csv
|
| 5 |
+
import html
|
| 6 |
+
import json
|
| 7 |
+
import re
|
| 8 |
+
import shutil
|
| 9 |
+
import subprocess
|
| 10 |
+
import sys
|
| 11 |
+
import tempfile
|
| 12 |
+
import time
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Dict, Iterable, List, Sequence, Tuple
|
| 15 |
+
from urllib.parse import parse_qs, urlparse
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
REPO_ROOT = Path(__file__).resolve().parents[1]
|
| 19 |
+
if str(REPO_ROOT) not in sys.path:
|
| 20 |
+
sys.path.insert(0, str(REPO_ROOT))
|
| 21 |
+
|
| 22 |
+
from utils.stats_npz import ensure_record, load_stats, save_stats
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
DEFAULT_SOURCE_METADATA_CSV = REPO_ROOT / "Sign-DWPose-2M-metadata_ori.csv"
|
| 26 |
+
DEFAULT_OUTPUT_METADATA_CSV = REPO_ROOT / "Sign-DWPose-2M-metadata_processed.csv"
|
| 27 |
+
DEFAULT_RAW_VIDEO_DIR = REPO_ROOT / "raw_video"
|
| 28 |
+
DEFAULT_RAW_CAPTION_DIR = REPO_ROOT / "raw_caption"
|
| 29 |
+
DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
|
| 30 |
+
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 31 |
+
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
| 32 |
+
DEFAULT_YT_DLP_EXTRACTOR_ARGS = "youtube:player_client=tv,web_safari,web"
|
| 33 |
+
COOKIE_DOMAINS = ("youtube.com", "google.com", "googlevideo.com", "ytimg.com")
|
| 34 |
+
TIMESTAMP_LINE_RE = re.compile(
|
| 35 |
+
r"^(?P<start>\d{2}:\d{2}:\d{2}\.\d{3})\s+-->\s+(?P<end>\d{2}:\d{2}:\d{2}\.\d{3})"
|
| 36 |
+
)
|
| 37 |
+
TAG_RE = re.compile(r"<[^>]+>")
|
| 38 |
+
ZERO_WIDTH_RE = re.compile(r"[\u200b\u200c\u200d\ufeff]+")
|
| 39 |
+
DEFAULT_COLUMNS = [
|
| 40 |
+
"video_id",
|
| 41 |
+
"sign_language",
|
| 42 |
+
"title",
|
| 43 |
+
"duration_sec",
|
| 44 |
+
"start_sec",
|
| 45 |
+
"end_sec",
|
| 46 |
+
"subtitle_languages",
|
| 47 |
+
"subtitle_dir_path",
|
| 48 |
+
"subtitle_en_source",
|
| 49 |
+
"raw_video_path",
|
| 50 |
+
"raw_metadata_path",
|
| 51 |
+
"metadata_status",
|
| 52 |
+
"subtitle_status",
|
| 53 |
+
"download_status",
|
| 54 |
+
"error",
|
| 55 |
+
"processed_at",
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def parse_args() -> argparse.Namespace:
|
| 60 |
+
parser = argparse.ArgumentParser(
|
| 61 |
+
description="Download raw videos and enrich the Sign-DWPose-2M metadata CSV."
|
| 62 |
+
)
|
| 63 |
+
parser.add_argument("--source-metadata-csv", type=Path, default=DEFAULT_SOURCE_METADATA_CSV)
|
| 64 |
+
parser.add_argument("--output-metadata-csv", type=Path, default=DEFAULT_OUTPUT_METADATA_CSV)
|
| 65 |
+
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 66 |
+
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 67 |
+
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 68 |
+
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 69 |
+
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 70 |
+
parser.add_argument("--limit", type=int, default=None)
|
| 71 |
+
parser.add_argument("--video-ids", nargs="*", default=None)
|
| 72 |
+
parser.add_argument("--force-metadata", action="store_true")
|
| 73 |
+
parser.add_argument("--force-subtitles", action="store_true")
|
| 74 |
+
parser.add_argument("--force-download", action="store_true")
|
| 75 |
+
parser.add_argument("--skip-video-download", action="store_true")
|
| 76 |
+
parser.add_argument("--skip-subtitles", action="store_true")
|
| 77 |
+
parser.add_argument("--local-only", action="store_true")
|
| 78 |
+
parser.add_argument("--sleep-seconds", type=float, default=0.0)
|
| 79 |
+
parser.add_argument("--cookies", type=Path, default=None)
|
| 80 |
+
parser.add_argument("--cookies-from-browser", default=None)
|
| 81 |
+
parser.add_argument("--extractor-args", default=DEFAULT_YT_DLP_EXTRACTOR_ARGS)
|
| 82 |
+
return parser.parse_args()
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def read_manifest(csv_path: Path) -> Tuple[List[Dict[str, str]], List[str]]:
|
| 86 |
+
if not csv_path.exists():
|
| 87 |
+
raise FileNotFoundError(f"Metadata CSV not found: {csv_path}")
|
| 88 |
+
|
| 89 |
+
csv.field_size_limit(min(sys.maxsize, max(csv.field_size_limit(), 10 * 1024 * 1024)))
|
| 90 |
+
|
| 91 |
+
with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
|
| 92 |
+
rows = list(csv.reader(handle))
|
| 93 |
+
|
| 94 |
+
if not rows:
|
| 95 |
+
return [], DEFAULT_COLUMNS.copy()
|
| 96 |
+
|
| 97 |
+
first = rows[0]
|
| 98 |
+
if first and first[0] == "video_id":
|
| 99 |
+
with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
|
| 100 |
+
reader = csv.DictReader(handle)
|
| 101 |
+
manifest_rows = [dict(row) for row in reader]
|
| 102 |
+
fieldnames = list(reader.fieldnames or [])
|
| 103 |
+
else:
|
| 104 |
+
manifest_rows = []
|
| 105 |
+
for row in rows:
|
| 106 |
+
if not row:
|
| 107 |
+
continue
|
| 108 |
+
manifest_rows.append(
|
| 109 |
+
{
|
| 110 |
+
"video_id": row[0].strip(),
|
| 111 |
+
"sign_language": row[1].strip() if len(row) > 1 else "",
|
| 112 |
+
}
|
| 113 |
+
)
|
| 114 |
+
fieldnames = []
|
| 115 |
+
|
| 116 |
+
ordered_fieldnames = []
|
| 117 |
+
for column in DEFAULT_COLUMNS + fieldnames:
|
| 118 |
+
if column and column not in ordered_fieldnames:
|
| 119 |
+
ordered_fieldnames.append(column)
|
| 120 |
+
|
| 121 |
+
for row in manifest_rows:
|
| 122 |
+
for column in ordered_fieldnames:
|
| 123 |
+
row.setdefault(column, "")
|
| 124 |
+
|
| 125 |
+
return manifest_rows, ordered_fieldnames
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def write_manifest(csv_path: Path, rows: Sequence[Dict[str, str]], fieldnames: Sequence[str]) -> None:
|
| 129 |
+
tmp_path = csv_path.with_suffix(csv_path.suffix + ".tmp")
|
| 130 |
+
with tmp_path.open("w", encoding="utf-8", newline="") as handle:
|
| 131 |
+
writer = csv.DictWriter(handle, fieldnames=fieldnames)
|
| 132 |
+
writer.writeheader()
|
| 133 |
+
for row in rows:
|
| 134 |
+
writer.writerow({column: row.get(column, "") for column in fieldnames})
|
| 135 |
+
tmp_path.replace(csv_path)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def build_yt_dlp_base_command(args: argparse.Namespace) -> List[str]:
|
| 139 |
+
command = ["yt-dlp", "--newline"]
|
| 140 |
+
node_binary = resolve_node_binary()
|
| 141 |
+
if node_binary is not None:
|
| 142 |
+
command.extend(["--js-runtimes", f"node:{node_binary}"])
|
| 143 |
+
if getattr(args, "_effective_cookies", None):
|
| 144 |
+
command.extend(["--cookies", str(args._effective_cookies)])
|
| 145 |
+
if args.cookies_from_browser:
|
| 146 |
+
command.extend(["--cookies-from-browser", args.cookies_from_browser])
|
| 147 |
+
if args.extractor_args:
|
| 148 |
+
command.extend(["--extractor-args", args.extractor_args])
|
| 149 |
+
return command
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def resolve_node_binary() -> Path | None:
|
| 153 |
+
node_path = shutil.which("node")
|
| 154 |
+
if node_path:
|
| 155 |
+
return Path(node_path)
|
| 156 |
+
|
| 157 |
+
python_path = Path(sys.executable).resolve()
|
| 158 |
+
for parent in python_path.parents:
|
| 159 |
+
fallback = parent / "bin" / "node"
|
| 160 |
+
if fallback.exists():
|
| 161 |
+
return fallback
|
| 162 |
+
return None
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def sanitize_cookie_file(cookie_path: Path) -> Path:
|
| 166 |
+
tmp_handle = tempfile.NamedTemporaryFile(
|
| 167 |
+
mode="w",
|
| 168 |
+
encoding="utf-8",
|
| 169 |
+
suffix=".txt",
|
| 170 |
+
prefix="sign_dwpose_cookies_",
|
| 171 |
+
delete=False,
|
| 172 |
+
)
|
| 173 |
+
with cookie_path.open("r", encoding="utf-8", errors="ignore") as src, tmp_handle as dst:
|
| 174 |
+
wrote_header = False
|
| 175 |
+
for raw_line in src:
|
| 176 |
+
line = raw_line.rstrip("\n")
|
| 177 |
+
if not line:
|
| 178 |
+
continue
|
| 179 |
+
if line.startswith("#"):
|
| 180 |
+
if not wrote_header:
|
| 181 |
+
dst.write("# Netscape HTTP Cookie File\n")
|
| 182 |
+
wrote_header = True
|
| 183 |
+
continue
|
| 184 |
+
|
| 185 |
+
parts = line.split("\t")
|
| 186 |
+
if len(parts) < 7:
|
| 187 |
+
continue
|
| 188 |
+
domain = parts[0].strip()
|
| 189 |
+
if not any(token in domain for token in COOKIE_DOMAINS):
|
| 190 |
+
continue
|
| 191 |
+
parts[1] = "TRUE" if domain.startswith(".") else "FALSE"
|
| 192 |
+
dst.write("\t".join(parts[:7]) + "\n")
|
| 193 |
+
return Path(tmp_handle.name)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def run_command(command: Sequence[str]) -> subprocess.CompletedProcess[str]:
|
| 197 |
+
return subprocess.run(command, capture_output=True, text=True)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def youtube_url(video_id: str) -> str:
|
| 201 |
+
return f"https://www.youtube.com/watch?v={video_id}"
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def fetch_metadata(video_id: str, args: argparse.Namespace) -> Tuple[Dict[str, object], str]:
|
| 205 |
+
command = build_yt_dlp_base_command(args)
|
| 206 |
+
command.extend(["-J", "--skip-download", youtube_url(video_id)])
|
| 207 |
+
result = run_command(command)
|
| 208 |
+
if result.returncode != 0:
|
| 209 |
+
raise RuntimeError(result.stderr.strip() or result.stdout.strip() or "yt-dlp metadata failed")
|
| 210 |
+
return json.loads(result.stdout), ""
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def caption_entry_has_translation(entry: Dict[str, object]) -> bool:
|
| 214 |
+
url = str(entry.get("url") or "")
|
| 215 |
+
if not url:
|
| 216 |
+
return False
|
| 217 |
+
return "tlang" in parse_qs(urlparse(url).query)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def filter_caption_languages(metadata: Dict[str, object]) -> Tuple[List[str], List[str], str]:
|
| 221 |
+
manual = sorted(
|
| 222 |
+
lang
|
| 223 |
+
for lang in (metadata.get("subtitles") or {}).keys()
|
| 224 |
+
if lang and lang != "live_chat"
|
| 225 |
+
)
|
| 226 |
+
native_automatic: List[str] = []
|
| 227 |
+
english_translations: List[str] = []
|
| 228 |
+
for lang, entries in sorted((metadata.get("automatic_captions") or {}).items()):
|
| 229 |
+
if not lang or lang == "live_chat" or lang in manual:
|
| 230 |
+
continue
|
| 231 |
+
entry_list = entries if isinstance(entries, list) else []
|
| 232 |
+
if any(isinstance(entry, dict) and caption_entry_has_translation(entry) for entry in entry_list):
|
| 233 |
+
if lang.startswith("en-"):
|
| 234 |
+
english_translations.append(lang)
|
| 235 |
+
continue
|
| 236 |
+
native_automatic.append(lang)
|
| 237 |
+
|
| 238 |
+
english_translation = ""
|
| 239 |
+
if english_translations:
|
| 240 |
+
preferred_sources = manual + native_automatic
|
| 241 |
+
for source_lang in preferred_sources:
|
| 242 |
+
candidate = f"en-{source_lang}"
|
| 243 |
+
if candidate in english_translations:
|
| 244 |
+
english_translation = candidate
|
| 245 |
+
break
|
| 246 |
+
if not english_translation:
|
| 247 |
+
english_translation = sorted(english_translations)[0]
|
| 248 |
+
elif manual or native_automatic:
|
| 249 |
+
english_translation = f"en-{(manual + native_automatic)[0]}"
|
| 250 |
+
return manual, native_automatic, english_translation
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def download_subtitles(
|
| 254 |
+
video_id: str,
|
| 255 |
+
subtitle_dir: Path,
|
| 256 |
+
manual_langs: Sequence[str],
|
| 257 |
+
native_automatic_langs: Sequence[str],
|
| 258 |
+
english_translation_lang: str,
|
| 259 |
+
args: argparse.Namespace,
|
| 260 |
+
) -> str:
|
| 261 |
+
subtitle_dir.mkdir(parents=True, exist_ok=True)
|
| 262 |
+
automatic_langs = list(native_automatic_langs)
|
| 263 |
+
if english_translation_lang:
|
| 264 |
+
automatic_langs.append(english_translation_lang)
|
| 265 |
+
requested_langs = list(manual_langs) + automatic_langs
|
| 266 |
+
if not requested_langs:
|
| 267 |
+
return ""
|
| 268 |
+
|
| 269 |
+
command = build_yt_dlp_base_command(args)
|
| 270 |
+
command.extend(
|
| 271 |
+
[
|
| 272 |
+
"--skip-download",
|
| 273 |
+
"--sub-format",
|
| 274 |
+
"vtt",
|
| 275 |
+
"--convert-subs",
|
| 276 |
+
"vtt",
|
| 277 |
+
"--output",
|
| 278 |
+
str(subtitle_dir / "%(id)s.%(ext)s"),
|
| 279 |
+
"--sub-langs",
|
| 280 |
+
",".join(requested_langs),
|
| 281 |
+
]
|
| 282 |
+
)
|
| 283 |
+
if manual_langs:
|
| 284 |
+
command.append("--write-subs")
|
| 285 |
+
if automatic_langs:
|
| 286 |
+
command.append("--write-auto-subs")
|
| 287 |
+
command.append(youtube_url(video_id))
|
| 288 |
+
|
| 289 |
+
result = run_command(command)
|
| 290 |
+
if result.returncode != 0:
|
| 291 |
+
stderr = result.stderr.strip()
|
| 292 |
+
stdout = result.stdout.strip()
|
| 293 |
+
if any(subtitle_dir.glob(f"{video_id}.*.vtt")):
|
| 294 |
+
return stderr or stdout
|
| 295 |
+
raise RuntimeError(stderr or stdout or "subtitle download failed")
|
| 296 |
+
return ""
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def subtitle_dir_for_video(dataset_dir: Path, video_id: str) -> Path:
|
| 300 |
+
return dataset_dir / video_id / "captions"
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def find_video_file(raw_video_dir: Path, video_id: str) -> Path | None:
|
| 304 |
+
candidates = []
|
| 305 |
+
for path in raw_video_dir.glob(f"{video_id}.*"):
|
| 306 |
+
if path.suffix in {".mp4", ".mkv", ".webm", ".mov"}:
|
| 307 |
+
candidates.append(path)
|
| 308 |
+
return sorted(candidates)[0] if candidates else None
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace) -> Tuple[str, str]:
|
| 312 |
+
raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 313 |
+
command = build_yt_dlp_base_command(args)
|
| 314 |
+
command.extend(
|
| 315 |
+
[
|
| 316 |
+
"--output",
|
| 317 |
+
str(raw_video_dir / "%(id)s.%(ext)s"),
|
| 318 |
+
"--merge-output-format",
|
| 319 |
+
"mp4",
|
| 320 |
+
youtube_url(video_id),
|
| 321 |
+
]
|
| 322 |
+
)
|
| 323 |
+
result = run_command(command)
|
| 324 |
+
video_path = find_video_file(raw_video_dir, video_id)
|
| 325 |
+
if result.returncode != 0 and not video_path:
|
| 326 |
+
raise RuntimeError(result.stderr.strip() or result.stdout.strip() or "video download failed")
|
| 327 |
+
return str(video_path.relative_to(REPO_ROOT)) if video_path else "", ""
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def subtitle_file_language(path: Path, video_id: str) -> str:
|
| 331 |
+
name = path.name
|
| 332 |
+
prefix = f"{video_id}."
|
| 333 |
+
if not name.startswith(prefix):
|
| 334 |
+
return ""
|
| 335 |
+
middle = name[len(prefix):]
|
| 336 |
+
if middle.endswith(".vtt"):
|
| 337 |
+
middle = middle[:-4]
|
| 338 |
+
return middle
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def clean_vtt_to_text(path: Path) -> str:
|
| 342 |
+
return normalize_subtitle_lines(extract_vtt_text_lines(path))
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def extract_vtt_text_lines(path: Path) -> List[str]:
|
| 346 |
+
lines: List[str] = []
|
| 347 |
+
with path.open("r", encoding="utf-8", errors="ignore") as handle:
|
| 348 |
+
for raw_line in handle:
|
| 349 |
+
line = raw_line.strip()
|
| 350 |
+
if not line:
|
| 351 |
+
continue
|
| 352 |
+
if line == "WEBVTT" or line.startswith("NOTE") or line.startswith("Kind:") or line.startswith("Language:"):
|
| 353 |
+
continue
|
| 354 |
+
if TIMESTAMP_LINE_RE.match(line) or "-->" in line:
|
| 355 |
+
continue
|
| 356 |
+
if line.isdigit():
|
| 357 |
+
continue
|
| 358 |
+
line = TAG_RE.sub("", line)
|
| 359 |
+
line = ZERO_WIDTH_RE.sub("", html.unescape(line)).strip()
|
| 360 |
+
if not line:
|
| 361 |
+
continue
|
| 362 |
+
lines.append(line)
|
| 363 |
+
return lines
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def normalize_subtitle_lines(lines: Sequence[str]) -> str:
|
| 367 |
+
normalized_lines: List[str] = []
|
| 368 |
+
for line in lines:
|
| 369 |
+
if not line:
|
| 370 |
+
continue
|
| 371 |
+
if normalized_lines and normalized_lines[-1] == line:
|
| 372 |
+
continue
|
| 373 |
+
if normalized_lines and line[:1] in {",", ".", "!", "?", ";", ":", "%", ")", "]", "}"}:
|
| 374 |
+
normalized_lines[-1] = normalized_lines[-1].rstrip() + line
|
| 375 |
+
continue
|
| 376 |
+
if normalized_lines and normalized_lines[-1].endswith(("-", "–", "—", "/")):
|
| 377 |
+
line = line.lstrip("-–—").lstrip()
|
| 378 |
+
normalized_lines[-1] = normalized_lines[-1].rstrip() + " " + line
|
| 379 |
+
continue
|
| 380 |
+
if normalized_lines and normalized_lines[-1].endswith("..."):
|
| 381 |
+
line = line.lstrip(".").lstrip()
|
| 382 |
+
normalized_lines[-1] = normalized_lines[-1].rstrip() + " " + line
|
| 383 |
+
continue
|
| 384 |
+
normalized_lines.append(line)
|
| 385 |
+
|
| 386 |
+
text = " ".join(normalized_lines)
|
| 387 |
+
text = re.sub(r"\s+", " ", text).strip()
|
| 388 |
+
text = re.sub(r"\s*-\s*-\s*", " - ", text)
|
| 389 |
+
text = re.sub(r"\.{4,}", "...", text)
|
| 390 |
+
text = re.sub(r"\s*\.\.\.\s*", " ... ", text)
|
| 391 |
+
text = re.sub(r"\s+", " ", text).strip()
|
| 392 |
+
text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
|
| 393 |
+
text = re.sub(r"([(\[{])\s+", r"\1", text)
|
| 394 |
+
text = re.sub(r"\s+([)\]}])", r"\1", text)
|
| 395 |
+
return text
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def timestamp_to_seconds(value: str) -> float:
|
| 399 |
+
hours, minutes, seconds = value.split(":")
|
| 400 |
+
return int(hours) * 3600 + int(minutes) * 60 + float(seconds)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def parse_vtt_segments(path: Path) -> List[Dict[str, object]]:
|
| 404 |
+
segments: List[Dict[str, object]] = []
|
| 405 |
+
current_start = ""
|
| 406 |
+
current_end = ""
|
| 407 |
+
current_lines: List[str] = []
|
| 408 |
+
|
| 409 |
+
def flush_segment() -> None:
|
| 410 |
+
nonlocal current_start, current_end, current_lines
|
| 411 |
+
if not current_start or not current_end:
|
| 412 |
+
current_lines = []
|
| 413 |
+
return
|
| 414 |
+
text = normalize_subtitle_lines(current_lines)
|
| 415 |
+
if text:
|
| 416 |
+
segments.append(
|
| 417 |
+
{
|
| 418 |
+
"start_sec": round(timestamp_to_seconds(current_start), 3),
|
| 419 |
+
"end_sec": round(timestamp_to_seconds(current_end), 3),
|
| 420 |
+
"text": text,
|
| 421 |
+
}
|
| 422 |
+
)
|
| 423 |
+
current_start = ""
|
| 424 |
+
current_end = ""
|
| 425 |
+
current_lines = []
|
| 426 |
+
|
| 427 |
+
with path.open("r", encoding="utf-8", errors="ignore") as handle:
|
| 428 |
+
for raw_line in handle:
|
| 429 |
+
line = raw_line.strip()
|
| 430 |
+
if not line:
|
| 431 |
+
flush_segment()
|
| 432 |
+
continue
|
| 433 |
+
if line == "WEBVTT" or line.startswith("NOTE") or line.startswith("Kind:") or line.startswith("Language:"):
|
| 434 |
+
continue
|
| 435 |
+
match = TIMESTAMP_LINE_RE.match(line)
|
| 436 |
+
if match:
|
| 437 |
+
flush_segment()
|
| 438 |
+
current_start = match.group("start")
|
| 439 |
+
current_end = match.group("end")
|
| 440 |
+
continue
|
| 441 |
+
if line.isdigit():
|
| 442 |
+
continue
|
| 443 |
+
line = TAG_RE.sub("", line)
|
| 444 |
+
line = ZERO_WIDTH_RE.sub("", html.unescape(line)).strip()
|
| 445 |
+
if not line:
|
| 446 |
+
continue
|
| 447 |
+
current_lines.append(line)
|
| 448 |
+
flush_segment()
|
| 449 |
+
return segments
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def load_subtitle_payloads(subtitle_dir: Path, video_id: str) -> Dict[str, Dict[str, object]]:
|
| 453 |
+
subtitle_payloads: Dict[str, Dict[str, object]] = {}
|
| 454 |
+
for path in sorted(subtitle_dir.glob(f"{video_id}.*.vtt")):
|
| 455 |
+
lang = subtitle_file_language(path, video_id)
|
| 456 |
+
if not lang:
|
| 457 |
+
continue
|
| 458 |
+
segments = parse_vtt_segments(path)
|
| 459 |
+
text = normalize_subtitle_lines(segment["text"] for segment in segments)
|
| 460 |
+
if text:
|
| 461 |
+
subtitle_payloads[lang] = {
|
| 462 |
+
"text": text,
|
| 463 |
+
"segments": segments,
|
| 464 |
+
"vtt_path": path.name,
|
| 465 |
+
}
|
| 466 |
+
return subtitle_payloads
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def select_english_subtitle(subtitle_payloads: Dict[str, Dict[str, object]]) -> Tuple[str, str]:
|
| 470 |
+
if "en" in subtitle_payloads:
|
| 471 |
+
return str(subtitle_payloads["en"]["text"]), "manual_or_auto_en"
|
| 472 |
+
english_variants = sorted(
|
| 473 |
+
lang for lang in subtitle_payloads if lang.startswith("en-") or lang.lower().startswith("en_")
|
| 474 |
+
)
|
| 475 |
+
if english_variants:
|
| 476 |
+
lang = english_variants[0]
|
| 477 |
+
source_lang = lang[3:]
|
| 478 |
+
return str(subtitle_payloads[lang]["text"]), f"translated_from_{source_lang}"
|
| 479 |
+
translated_candidates = sorted(lang for lang in subtitle_payloads if lang.endswith("-en"))
|
| 480 |
+
if translated_candidates:
|
| 481 |
+
lang = translated_candidates[0]
|
| 482 |
+
return str(subtitle_payloads[lang]["text"]), f"translated_from_{lang[:-3]}"
|
| 483 |
+
return "", ""
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def persist_raw_metadata(raw_metadata_dir: Path, video_id: str, metadata: Dict[str, object]) -> str:
|
| 487 |
+
raw_metadata_dir.mkdir(parents=True, exist_ok=True)
|
| 488 |
+
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 489 |
+
with metadata_path.open("w", encoding="utf-8") as handle:
|
| 490 |
+
json.dump(metadata, handle, ensure_ascii=False, indent=2)
|
| 491 |
+
return str(metadata_path.relative_to(REPO_ROOT))
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def iter_target_rows(
|
| 495 |
+
rows: Sequence[Dict[str, str]],
|
| 496 |
+
video_ids: Iterable[str] | None,
|
| 497 |
+
limit: int | None,
|
| 498 |
+
local_video_ids: set[str] | None = None,
|
| 499 |
+
args: argparse.Namespace | None = None,
|
| 500 |
+
) -> List[Dict[str, str]]:
|
| 501 |
+
video_id_filter = set(video_ids or [])
|
| 502 |
+
selected = []
|
| 503 |
+
for row in rows:
|
| 504 |
+
video_id = row["video_id"]
|
| 505 |
+
if video_id_filter and video_id not in video_id_filter:
|
| 506 |
+
continue
|
| 507 |
+
if local_video_ids is not None and video_id not in local_video_ids:
|
| 508 |
+
continue
|
| 509 |
+
if args is not None and not row_needs_processing(row, args):
|
| 510 |
+
continue
|
| 511 |
+
selected.append(row)
|
| 512 |
+
if limit is not None and len(selected) >= limit:
|
| 513 |
+
break
|
| 514 |
+
return selected
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
def collect_local_video_ids(args: argparse.Namespace) -> set[str]:
|
| 518 |
+
local_video_ids: set[str] = set()
|
| 519 |
+
if args.dataset_dir.exists():
|
| 520 |
+
for path in args.dataset_dir.iterdir():
|
| 521 |
+
if not path.is_dir():
|
| 522 |
+
continue
|
| 523 |
+
if (path / "captions").exists():
|
| 524 |
+
local_video_ids.add(path.name)
|
| 525 |
+
if args.raw_metadata_dir.exists():
|
| 526 |
+
local_video_ids.update(path.stem for path in args.raw_metadata_dir.glob("*.json"))
|
| 527 |
+
return local_video_ids
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def row_needs_processing(row: Dict[str, str], args: argparse.Namespace) -> bool:
|
| 531 |
+
if args.force_metadata or args.force_subtitles or args.force_download:
|
| 532 |
+
return True
|
| 533 |
+
metadata_status = (row.get("metadata_status") or "").strip()
|
| 534 |
+
subtitle_status = (row.get("subtitle_status") or "").strip()
|
| 535 |
+
download_status = (row.get("download_status") or "").strip()
|
| 536 |
+
|
| 537 |
+
needs_metadata = metadata_status != "ok"
|
| 538 |
+
needs_subtitles = (not args.skip_subtitles) and subtitle_status != "ok"
|
| 539 |
+
needs_download = (not args.skip_video_download) and download_status != "ok"
|
| 540 |
+
return needs_metadata or needs_subtitles or needs_download
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
def main() -> None:
|
| 544 |
+
args = parse_args()
|
| 545 |
+
temp_cookie_path: Path | None = None
|
| 546 |
+
if args.cookies:
|
| 547 |
+
temp_cookie_path = sanitize_cookie_file(args.cookies)
|
| 548 |
+
args._effective_cookies = temp_cookie_path
|
| 549 |
+
else:
|
| 550 |
+
args._effective_cookies = None
|
| 551 |
+
manifest_input_path = args.output_metadata_csv if args.output_metadata_csv.exists() else args.source_metadata_csv
|
| 552 |
+
rows, fieldnames = read_manifest(manifest_input_path)
|
| 553 |
+
local_video_ids = collect_local_video_ids(args) if args.local_only else None
|
| 554 |
+
selected_rows = iter_target_rows(rows, args.video_ids, args.limit, local_video_ids, args)
|
| 555 |
+
stats = load_stats(args.stats_npz)
|
| 556 |
+
|
| 557 |
+
try:
|
| 558 |
+
args.raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 559 |
+
args.raw_metadata_dir.mkdir(parents=True, exist_ok=True)
|
| 560 |
+
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 561 |
+
|
| 562 |
+
for index, row in enumerate(selected_rows, start=1):
|
| 563 |
+
video_id = row["video_id"].strip()
|
| 564 |
+
if not video_id:
|
| 565 |
+
continue
|
| 566 |
+
stats_record = ensure_record(stats, video_id)
|
| 567 |
+
|
| 568 |
+
print(f"[{index}/{len(selected_rows)}] Processing {video_id}")
|
| 569 |
+
metadata_error = ""
|
| 570 |
+
subtitle_error = ""
|
| 571 |
+
download_error = ""
|
| 572 |
+
metadata: Dict[str, object] | None = None
|
| 573 |
+
|
| 574 |
+
try:
|
| 575 |
+
metadata_path = args.raw_metadata_dir / f"{video_id}.json"
|
| 576 |
+
if metadata_path.exists() and not args.force_metadata:
|
| 577 |
+
with metadata_path.open("r", encoding="utf-8") as handle:
|
| 578 |
+
metadata = json.load(handle)
|
| 579 |
+
else:
|
| 580 |
+
metadata, metadata_error = fetch_metadata(video_id, args)
|
| 581 |
+
row["raw_metadata_path"] = persist_raw_metadata(args.raw_metadata_dir, video_id, metadata)
|
| 582 |
+
row["metadata_status"] = "ok"
|
| 583 |
+
except Exception as exc:
|
| 584 |
+
metadata_error = str(exc)
|
| 585 |
+
row["metadata_status"] = "failed"
|
| 586 |
+
row["error"] = metadata_error
|
| 587 |
+
row["processed_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
|
| 588 |
+
stats_record["metadata_status"] = "failed"
|
| 589 |
+
stats_record["last_error"] = metadata_error
|
| 590 |
+
stats_record["updated_at"] = row["processed_at"]
|
| 591 |
+
write_manifest(args.output_metadata_csv, rows, fieldnames)
|
| 592 |
+
save_stats(args.stats_npz, stats)
|
| 593 |
+
print(f" metadata failed: {metadata_error}")
|
| 594 |
+
if args.sleep_seconds > 0:
|
| 595 |
+
time.sleep(args.sleep_seconds)
|
| 596 |
+
continue
|
| 597 |
+
|
| 598 |
+
assert metadata is not None
|
| 599 |
+
row["raw_metadata_path"] = row.get("raw_metadata_path") or persist_raw_metadata(args.raw_metadata_dir, video_id, metadata)
|
| 600 |
+
row["title"] = str(metadata.get("title") or "")
|
| 601 |
+
duration = metadata.get("duration")
|
| 602 |
+
row["duration_sec"] = str(duration or "")
|
| 603 |
+
row["start_sec"] = "0"
|
| 604 |
+
row["end_sec"] = str(duration or "")
|
| 605 |
+
|
| 606 |
+
manual_langs, native_automatic_langs, english_translation_lang = filter_caption_languages(metadata)
|
| 607 |
+
subtitle_dir = subtitle_dir_for_video(args.dataset_dir, video_id)
|
| 608 |
+
subtitle_payloads: Dict[str, Dict[str, object]] = {}
|
| 609 |
+
|
| 610 |
+
if args.skip_subtitles:
|
| 611 |
+
row["subtitle_status"] = "skipped"
|
| 612 |
+
else:
|
| 613 |
+
try:
|
| 614 |
+
need_subtitles = args.force_subtitles or not any(subtitle_dir.glob(f"{video_id}.*.vtt"))
|
| 615 |
+
if need_subtitles:
|
| 616 |
+
subtitle_error = download_subtitles(
|
| 617 |
+
video_id,
|
| 618 |
+
subtitle_dir,
|
| 619 |
+
manual_langs,
|
| 620 |
+
native_automatic_langs,
|
| 621 |
+
english_translation_lang,
|
| 622 |
+
args,
|
| 623 |
+
)
|
| 624 |
+
subtitle_payloads = load_subtitle_payloads(subtitle_dir, video_id)
|
| 625 |
+
row["subtitle_status"] = "ok" if subtitle_payloads else "missing"
|
| 626 |
+
except Exception as exc:
|
| 627 |
+
subtitle_error = str(exc)
|
| 628 |
+
subtitle_payloads = load_subtitle_payloads(subtitle_dir, video_id)
|
| 629 |
+
row["subtitle_status"] = "partial" if subtitle_payloads else "failed"
|
| 630 |
+
|
| 631 |
+
row["subtitle_languages"] = "|".join(sorted(subtitle_payloads))
|
| 632 |
+
row["subtitle_dir_path"] = str(subtitle_dir.relative_to(REPO_ROOT)) if subtitle_payloads else ""
|
| 633 |
+
subtitle_en, subtitle_en_source = select_english_subtitle(subtitle_payloads)
|
| 634 |
+
row["subtitle_en_source"] = subtitle_en_source
|
| 635 |
+
if "subtitle_texts_json" in row:
|
| 636 |
+
row["subtitle_texts_json"] = ""
|
| 637 |
+
if "subtitle_en" in row:
|
| 638 |
+
row["subtitle_en"] = ""
|
| 639 |
+
if "subtitle_json_path" in row:
|
| 640 |
+
row["subtitle_json_path"] = ""
|
| 641 |
+
if "raw_caption_dir" in row:
|
| 642 |
+
row["raw_caption_dir"] = ""
|
| 643 |
+
|
| 644 |
+
try:
|
| 645 |
+
existing_video = find_video_file(args.raw_video_dir, video_id)
|
| 646 |
+
if args.skip_video_download:
|
| 647 |
+
row["download_status"] = "skipped"
|
| 648 |
+
row["raw_video_path"] = str(existing_video.relative_to(REPO_ROOT)) if existing_video else ""
|
| 649 |
+
else:
|
| 650 |
+
if existing_video is None or args.force_download:
|
| 651 |
+
row["raw_video_path"], download_error = download_video(video_id, args.raw_video_dir, args)
|
| 652 |
+
else:
|
| 653 |
+
row["raw_video_path"] = str(existing_video.relative_to(REPO_ROOT))
|
| 654 |
+
row["download_status"] = "ok" if row["raw_video_path"] else "failed"
|
| 655 |
+
except Exception as exc:
|
| 656 |
+
download_error = str(exc)
|
| 657 |
+
row["download_status"] = "failed"
|
| 658 |
+
|
| 659 |
+
errors = [value for value in [metadata_error, subtitle_error, download_error] if value]
|
| 660 |
+
row["error"] = " | ".join(errors)
|
| 661 |
+
row["processed_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
|
| 662 |
+
stats_record["sign_language"] = row.get("sign_language", "")
|
| 663 |
+
stats_record["title"] = row["title"]
|
| 664 |
+
stats_record["duration_sec"] = row["duration_sec"]
|
| 665 |
+
stats_record["start_sec"] = row["start_sec"]
|
| 666 |
+
stats_record["end_sec"] = row["end_sec"]
|
| 667 |
+
stats_record["subtitle_languages"] = row["subtitle_languages"]
|
| 668 |
+
stats_record["subtitle_dir_path"] = row["subtitle_dir_path"]
|
| 669 |
+
stats_record["subtitle_en_source"] = row["subtitle_en_source"]
|
| 670 |
+
stats_record["raw_video_path"] = row["raw_video_path"]
|
| 671 |
+
stats_record["raw_metadata_path"] = row["raw_metadata_path"]
|
| 672 |
+
stats_record["metadata_status"] = row["metadata_status"]
|
| 673 |
+
stats_record["subtitle_status"] = row["subtitle_status"]
|
| 674 |
+
stats_record["download_status"] = row["download_status"]
|
| 675 |
+
stats_record["last_error"] = row["error"]
|
| 676 |
+
stats_record["updated_at"] = row["processed_at"]
|
| 677 |
+
write_manifest(args.output_metadata_csv, rows, fieldnames)
|
| 678 |
+
save_stats(args.stats_npz, stats)
|
| 679 |
+
|
| 680 |
+
if row["download_status"] == "failed":
|
| 681 |
+
print(f" video download failed: {download_error}")
|
| 682 |
+
if row["subtitle_status"] in {"failed", "partial"}:
|
| 683 |
+
print(f" subtitle status: {row['subtitle_status']} {subtitle_error}")
|
| 684 |
+
|
| 685 |
+
if args.sleep_seconds > 0:
|
| 686 |
+
time.sleep(args.sleep_seconds)
|
| 687 |
+
finally:
|
| 688 |
+
if temp_cookie_path is not None:
|
| 689 |
+
temp_cookie_path.unlink(missing_ok=True)
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
if __name__ == "__main__":
|
| 693 |
+
main()
|
scripts/pipeline02_extract_dwpose_from_video.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import multiprocessing as mp
|
| 5 |
+
import shutil
|
| 6 |
+
import subprocess
|
| 7 |
+
import sys
|
| 8 |
+
import tempfile
|
| 9 |
+
import time
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Dict, List, Sequence
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import torch
|
| 15 |
+
from easy_dwpose import DWposeDetector
|
| 16 |
+
from PIL import Image
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
REPO_ROOT = Path(__file__).resolve().parents[1]
|
| 20 |
+
if str(REPO_ROOT) not in sys.path:
|
| 21 |
+
sys.path.insert(0, str(REPO_ROOT))
|
| 22 |
+
|
| 23 |
+
from utils.stats_npz import load_stats, update_video_stats
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
DEFAULT_RAW_VIDEO_DIR = REPO_ROOT / "raw_video"
|
| 27 |
+
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 28 |
+
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
| 29 |
+
VIDEO_EXTENSIONS = {".mp4", ".mkv", ".webm", ".mov"}
|
| 30 |
+
COMPLETE_MARKER_NAME = ".complete"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def parse_args() -> argparse.Namespace:
|
| 34 |
+
parser = argparse.ArgumentParser(
|
| 35 |
+
description="Extract DWpose NPZ files from raw videos."
|
| 36 |
+
)
|
| 37 |
+
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 38 |
+
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 39 |
+
parser.add_argument("--fps", type=int, default=24)
|
| 40 |
+
parser.add_argument("--limit", type=int, default=None)
|
| 41 |
+
parser.add_argument("--workers", type=int, default=None)
|
| 42 |
+
parser.add_argument("--video-ids", nargs="*", default=None)
|
| 43 |
+
parser.add_argument("--force", action="store_true")
|
| 44 |
+
parser.add_argument("--delete-source-on-success", action="store_true")
|
| 45 |
+
parser.add_argument("--tmp-root", type=Path, default=Path("/tmp"))
|
| 46 |
+
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 47 |
+
return parser.parse_args()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def select_video_paths(args: argparse.Namespace) -> List[Path]:
|
| 51 |
+
args.raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 52 |
+
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 53 |
+
video_id_filter = set(args.video_ids or [])
|
| 54 |
+
stats = load_stats(args.stats_npz)
|
| 55 |
+
|
| 56 |
+
selected = []
|
| 57 |
+
for path in sorted(args.raw_video_dir.iterdir()):
|
| 58 |
+
if not path.is_file() or path.suffix.lower() not in VIDEO_EXTENSIONS:
|
| 59 |
+
continue
|
| 60 |
+
video_id = path.stem
|
| 61 |
+
if video_id_filter and video_id not in video_id_filter:
|
| 62 |
+
continue
|
| 63 |
+
npz_dir = args.dataset_dir / video_id / "npz"
|
| 64 |
+
complete_marker = npz_dir / COMPLETE_MARKER_NAME
|
| 65 |
+
if (
|
| 66 |
+
not args.force
|
| 67 |
+
and npz_dir.exists()
|
| 68 |
+
and complete_marker.exists()
|
| 69 |
+
and stats.get(video_id, {}).get("process_status") == "ok"
|
| 70 |
+
):
|
| 71 |
+
continue
|
| 72 |
+
selected.append(path)
|
| 73 |
+
if args.limit is not None and len(selected) >= args.limit:
|
| 74 |
+
break
|
| 75 |
+
return selected
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def extract_frames(video_path: Path, frame_dir: Path, fps: int) -> None:
|
| 79 |
+
command = [
|
| 80 |
+
"ffmpeg",
|
| 81 |
+
"-hide_banner",
|
| 82 |
+
"-loglevel",
|
| 83 |
+
"error",
|
| 84 |
+
"-y",
|
| 85 |
+
"-i",
|
| 86 |
+
str(video_path),
|
| 87 |
+
"-vf",
|
| 88 |
+
f"fps={fps}",
|
| 89 |
+
str(frame_dir / "%08d.jpg"),
|
| 90 |
+
]
|
| 91 |
+
subprocess.run(command, check=True)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int) -> Dict[str, np.ndarray]:
|
| 95 |
+
num_persons = int(pose_data["faces"].shape[0]) if "faces" in pose_data else 0
|
| 96 |
+
payload: Dict[str, np.ndarray] = {
|
| 97 |
+
"num_persons": np.asarray(num_persons, dtype=np.int32),
|
| 98 |
+
"frame_width": np.asarray(width, dtype=np.int32),
|
| 99 |
+
"frame_height": np.asarray(height, dtype=np.int32),
|
| 100 |
+
}
|
| 101 |
+
if num_persons == 0:
|
| 102 |
+
return payload
|
| 103 |
+
|
| 104 |
+
bodies = pose_data["bodies"].reshape(num_persons, 18, 2).astype(np.float32)
|
| 105 |
+
body_scores = pose_data["body_scores"].astype(np.float32)
|
| 106 |
+
faces = pose_data["faces"].astype(np.float32)
|
| 107 |
+
face_scores = pose_data["faces_scores"].astype(np.float32)
|
| 108 |
+
hands = pose_data["hands"].astype(np.float32)
|
| 109 |
+
hand_scores = pose_data["hands_scores"].astype(np.float32)
|
| 110 |
+
|
| 111 |
+
for person_idx in range(num_persons):
|
| 112 |
+
prefix = f"person_{person_idx:03d}"
|
| 113 |
+
payload[f"{prefix}_body_keypoints"] = bodies[person_idx]
|
| 114 |
+
payload[f"{prefix}_body_scores"] = body_scores[person_idx]
|
| 115 |
+
payload[f"{prefix}_face_keypoints"] = faces[person_idx]
|
| 116 |
+
payload[f"{prefix}_face_scores"] = face_scores[person_idx]
|
| 117 |
+
left_hand_idx = person_idx * 2
|
| 118 |
+
right_hand_idx = left_hand_idx + 1
|
| 119 |
+
if left_hand_idx < len(hands):
|
| 120 |
+
payload[f"{prefix}_left_hand_keypoints"] = hands[left_hand_idx]
|
| 121 |
+
payload[f"{prefix}_left_hand_scores"] = hand_scores[left_hand_idx]
|
| 122 |
+
if right_hand_idx < len(hands):
|
| 123 |
+
payload[f"{prefix}_right_hand_keypoints"] = hands[right_hand_idx]
|
| 124 |
+
payload[f"{prefix}_right_hand_scores"] = hand_scores[right_hand_idx]
|
| 125 |
+
return payload
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def process_video(video_path: Path, dataset_dir: Path, fps: int, detector: DWposeDetector, tmp_root: Path, force: bool) -> None:
|
| 129 |
+
video_id = video_path.stem
|
| 130 |
+
output_npz_dir = dataset_dir / video_id / "npz"
|
| 131 |
+
complete_marker = output_npz_dir / COMPLETE_MARKER_NAME
|
| 132 |
+
if output_npz_dir.exists() and complete_marker.exists() and not force:
|
| 133 |
+
print(f"Skip {video_id}: NPZ files already exist")
|
| 134 |
+
return
|
| 135 |
+
|
| 136 |
+
if output_npz_dir.exists() and (force or not complete_marker.exists()):
|
| 137 |
+
shutil.rmtree(output_npz_dir)
|
| 138 |
+
output_npz_dir.mkdir(parents=True, exist_ok=True)
|
| 139 |
+
|
| 140 |
+
tmp_root.mkdir(parents=True, exist_ok=True)
|
| 141 |
+
frame_dir = Path(tempfile.mkdtemp(prefix=f"sign_dwpose_{video_id}_", dir=str(tmp_root)))
|
| 142 |
+
|
| 143 |
+
try:
|
| 144 |
+
extract_frames(video_path, frame_dir, fps)
|
| 145 |
+
frame_paths = sorted(frame_dir.glob("*.jpg"))
|
| 146 |
+
total_frames = len(frame_paths)
|
| 147 |
+
print(f"{video_id}: extracted {total_frames} frames at {fps} fps")
|
| 148 |
+
|
| 149 |
+
for frame_index, frame_path in enumerate(frame_paths, start=1):
|
| 150 |
+
with Image.open(frame_path) as image:
|
| 151 |
+
frame = image.convert("RGB")
|
| 152 |
+
width, height = frame.size
|
| 153 |
+
pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
|
| 154 |
+
payload = build_npz_payload(pose_data, width, height)
|
| 155 |
+
np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
|
| 156 |
+
|
| 157 |
+
if frame_index == 1 or frame_index % 100 == 0 or frame_index == total_frames:
|
| 158 |
+
print(f"{video_id}: processed {frame_index}/{total_frames} frames")
|
| 159 |
+
complete_marker.write_text(
|
| 160 |
+
f"video_id={video_id}\nfps={fps}\nframes={total_frames}\ncompleted_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n",
|
| 161 |
+
encoding="utf-8",
|
| 162 |
+
)
|
| 163 |
+
finally:
|
| 164 |
+
shutil.rmtree(frame_dir, ignore_errors=True)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argparse.Namespace) -> None:
|
| 168 |
+
use_cuda = torch.cuda.is_available() and torch.cuda.device_count() > rank
|
| 169 |
+
device = f"cuda:{rank}" if use_cuda else "cpu"
|
| 170 |
+
detector = DWposeDetector(device=device)
|
| 171 |
+
print(f"Worker {rank}: device={device}")
|
| 172 |
+
|
| 173 |
+
for index, video_path in enumerate(video_paths):
|
| 174 |
+
if index % worker_count != rank:
|
| 175 |
+
continue
|
| 176 |
+
try:
|
| 177 |
+
update_video_stats(
|
| 178 |
+
args.stats_npz,
|
| 179 |
+
video_path.stem,
|
| 180 |
+
process_status="running",
|
| 181 |
+
last_error="",
|
| 182 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 183 |
+
)
|
| 184 |
+
process_video(
|
| 185 |
+
video_path=video_path,
|
| 186 |
+
dataset_dir=args.dataset_dir,
|
| 187 |
+
fps=args.fps,
|
| 188 |
+
detector=detector,
|
| 189 |
+
tmp_root=args.tmp_root,
|
| 190 |
+
force=args.force,
|
| 191 |
+
)
|
| 192 |
+
update_video_stats(
|
| 193 |
+
args.stats_npz,
|
| 194 |
+
video_path.stem,
|
| 195 |
+
process_status="ok",
|
| 196 |
+
last_error="",
|
| 197 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 198 |
+
)
|
| 199 |
+
if args.delete_source_on_success and video_path.exists():
|
| 200 |
+
video_path.unlink()
|
| 201 |
+
print(f"Worker {rank}: deleted source video {video_path.name}")
|
| 202 |
+
except Exception as exc:
|
| 203 |
+
update_video_stats(
|
| 204 |
+
args.stats_npz,
|
| 205 |
+
video_path.stem,
|
| 206 |
+
process_status="failed",
|
| 207 |
+
last_error=str(exc),
|
| 208 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 209 |
+
)
|
| 210 |
+
print(f"Worker {rank}: failed on {video_path.name}: {exc}")
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def main() -> None:
|
| 214 |
+
args = parse_args()
|
| 215 |
+
video_paths = select_video_paths(args)
|
| 216 |
+
if not video_paths:
|
| 217 |
+
print("No videos need DWpose extraction.")
|
| 218 |
+
return
|
| 219 |
+
|
| 220 |
+
if args.workers is not None:
|
| 221 |
+
worker_count = max(1, args.workers)
|
| 222 |
+
else:
|
| 223 |
+
worker_count = torch.cuda.device_count() if torch.cuda.is_available() else 1
|
| 224 |
+
worker_count = max(1, worker_count)
|
| 225 |
+
worker_count = min(worker_count, len(video_paths))
|
| 226 |
+
|
| 227 |
+
if worker_count == 1:
|
| 228 |
+
worker(0, 1, video_paths, args)
|
| 229 |
+
return
|
| 230 |
+
|
| 231 |
+
mp.set_start_method("spawn", force=True)
|
| 232 |
+
processes = []
|
| 233 |
+
for rank in range(worker_count):
|
| 234 |
+
process = mp.Process(target=worker, args=(rank, worker_count, video_paths, args))
|
| 235 |
+
process.start()
|
| 236 |
+
processes.append(process)
|
| 237 |
+
|
| 238 |
+
failed = False
|
| 239 |
+
for process in processes:
|
| 240 |
+
process.join()
|
| 241 |
+
failed = failed or process.exitcode != 0
|
| 242 |
+
if failed:
|
| 243 |
+
raise SystemExit("One or more DWpose workers failed.")
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
if __name__ == "__main__":
|
| 247 |
+
main()
|
scripts/pipeline03_upload_to_huggingface.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import shutil
|
| 7 |
+
import sys
|
| 8 |
+
import tarfile
|
| 9 |
+
import time
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Dict, List, Sequence, Tuple
|
| 12 |
+
|
| 13 |
+
from huggingface_hub import HfApi
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
REPO_ROOT = Path(__file__).resolve().parents[1]
|
| 17 |
+
if str(REPO_ROOT) not in sys.path:
|
| 18 |
+
sys.path.insert(0, str(REPO_ROOT))
|
| 19 |
+
|
| 20 |
+
from utils.stats_npz import update_many_video_stats
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 24 |
+
DEFAULT_RAW_VIDEO_DIR = REPO_ROOT / "raw_video"
|
| 25 |
+
DEFAULT_RAW_CAPTION_DIR = REPO_ROOT / "raw_caption"
|
| 26 |
+
DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
|
| 27 |
+
DEFAULT_ARCHIVE_DIR = REPO_ROOT / "archives"
|
| 28 |
+
DEFAULT_PROGRESS_PATH = REPO_ROOT / "archive_upload_progress.json"
|
| 29 |
+
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
| 30 |
+
DEFAULT_TARGET_BYTES = 14 * 1024 * 1024 * 1024
|
| 31 |
+
COMPLETE_MARKER_NAME = ".complete"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def parse_args() -> argparse.Namespace:
|
| 35 |
+
parser = argparse.ArgumentParser(
|
| 36 |
+
description="Archive NPZ folders into 14GB tar files and upload them to Hugging Face."
|
| 37 |
+
)
|
| 38 |
+
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 39 |
+
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 40 |
+
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 41 |
+
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 42 |
+
parser.add_argument("--archive-dir", type=Path, default=DEFAULT_ARCHIVE_DIR)
|
| 43 |
+
parser.add_argument("--progress-path", type=Path, default=DEFAULT_PROGRESS_PATH)
|
| 44 |
+
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 45 |
+
parser.add_argument("--repo-id", default="SignerX/Sign-DWPose-2M")
|
| 46 |
+
parser.add_argument("--repo-type", default="dataset")
|
| 47 |
+
parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
|
| 48 |
+
parser.add_argument("--require-target-bytes", action="store_true")
|
| 49 |
+
parser.add_argument("--dry-run", action="store_true")
|
| 50 |
+
parser.add_argument("--token", default=os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_HUB_TOKEN"))
|
| 51 |
+
return parser.parse_args()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def load_progress(progress_path: Path) -> Dict[str, object]:
|
| 55 |
+
if progress_path.exists():
|
| 56 |
+
with progress_path.open("r", encoding="utf-8") as handle:
|
| 57 |
+
return json.load(handle)
|
| 58 |
+
return {"archives": {}, "uploaded_folders": {}}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def save_progress(progress_path: Path, progress: Dict[str, object]) -> None:
|
| 62 |
+
progress_path.parent.mkdir(parents=True, exist_ok=True)
|
| 63 |
+
with progress_path.open("w", encoding="utf-8") as handle:
|
| 64 |
+
json.dump(progress, handle, ensure_ascii=False, indent=2)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def folder_size_bytes(folder_path: Path) -> int:
|
| 68 |
+
total = 0
|
| 69 |
+
for path in folder_path.rglob("*"):
|
| 70 |
+
if path.is_file():
|
| 71 |
+
total += path.stat().st_size
|
| 72 |
+
return total
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def list_unuploaded_folders(dataset_dir: Path, progress: Dict[str, object]) -> List[Tuple[str, Path, int]]:
|
| 76 |
+
uploaded_folders = progress.get("uploaded_folders", {})
|
| 77 |
+
folders = []
|
| 78 |
+
for folder_path in sorted(dataset_dir.iterdir()):
|
| 79 |
+
if not folder_path.is_dir():
|
| 80 |
+
continue
|
| 81 |
+
folder_name = folder_path.name
|
| 82 |
+
if folder_name in uploaded_folders:
|
| 83 |
+
continue
|
| 84 |
+
npz_dir = folder_path / "npz"
|
| 85 |
+
if not (npz_dir / COMPLETE_MARKER_NAME).exists():
|
| 86 |
+
continue
|
| 87 |
+
folders.append((folder_name, folder_path, folder_size_bytes(folder_path)))
|
| 88 |
+
return folders
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def build_batch(folders: Sequence[Tuple[str, Path, int]], target_bytes: int) -> List[Tuple[str, Path, int]]:
|
| 92 |
+
batch = []
|
| 93 |
+
total_bytes = 0
|
| 94 |
+
for folder_info in folders:
|
| 95 |
+
_, _, folder_bytes = folder_info
|
| 96 |
+
if batch and total_bytes + folder_bytes > target_bytes:
|
| 97 |
+
break
|
| 98 |
+
batch.append(folder_info)
|
| 99 |
+
total_bytes += folder_bytes
|
| 100 |
+
if total_bytes >= target_bytes:
|
| 101 |
+
break
|
| 102 |
+
return batch
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def total_batchable_bytes(folders: Sequence[Tuple[str, Path, int]]) -> int:
|
| 106 |
+
return sum(folder_bytes for _, _, folder_bytes in folders)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def next_archive_index(progress: Dict[str, object], repo_files: Sequence[str]) -> int:
|
| 110 |
+
indices = []
|
| 111 |
+
for archive_name in progress.get("archives", {}):
|
| 112 |
+
if archive_name.startswith("Sign_DWPose_NPZ_") and archive_name.endswith(".tar"):
|
| 113 |
+
indices.append(int(archive_name[-10:-4]))
|
| 114 |
+
for repo_file in repo_files:
|
| 115 |
+
name = Path(repo_file).name
|
| 116 |
+
if name.startswith("Sign_DWPose_NPZ_") and name.endswith(".tar"):
|
| 117 |
+
indices.append(int(name[-10:-4]))
|
| 118 |
+
return (max(indices) + 1) if indices else 1
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def create_tar_archive(archive_path: Path, dataset_dir: Path, folder_names: Sequence[str]) -> None:
|
| 122 |
+
archive_path.parent.mkdir(parents=True, exist_ok=True)
|
| 123 |
+
with tarfile.open(archive_path, mode="w") as tar:
|
| 124 |
+
for folder_name in folder_names:
|
| 125 |
+
tar.add(dataset_dir / folder_name, arcname=folder_name, recursive=True)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def upload_archive(api: HfApi, repo_id: str, repo_type: str, archive_path: Path) -> None:
|
| 129 |
+
api.upload_file(
|
| 130 |
+
path_or_fileobj=str(archive_path),
|
| 131 |
+
path_in_repo=archive_path.name,
|
| 132 |
+
repo_id=repo_id,
|
| 133 |
+
repo_type=repo_type,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def cleanup_local_assets(
|
| 138 |
+
video_ids: Sequence[str],
|
| 139 |
+
dataset_dir: Path,
|
| 140 |
+
raw_video_dir: Path,
|
| 141 |
+
raw_caption_dir: Path,
|
| 142 |
+
raw_metadata_dir: Path,
|
| 143 |
+
) -> None:
|
| 144 |
+
for video_id in video_ids:
|
| 145 |
+
dataset_video_dir = dataset_dir / video_id
|
| 146 |
+
if dataset_video_dir.exists():
|
| 147 |
+
shutil.rmtree(dataset_video_dir, ignore_errors=True)
|
| 148 |
+
for path in raw_video_dir.glob(f"{video_id}.*"):
|
| 149 |
+
if path.is_file():
|
| 150 |
+
path.unlink()
|
| 151 |
+
caption_dir = raw_caption_dir / video_id
|
| 152 |
+
if caption_dir.exists():
|
| 153 |
+
shutil.rmtree(caption_dir, ignore_errors=True)
|
| 154 |
+
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 155 |
+
if metadata_path.exists():
|
| 156 |
+
metadata_path.unlink()
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def format_size(num_bytes: int) -> str:
|
| 160 |
+
size = float(num_bytes)
|
| 161 |
+
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
| 162 |
+
if size < 1024 or unit == "TB":
|
| 163 |
+
return f"{size:.2f} {unit}"
|
| 164 |
+
size /= 1024
|
| 165 |
+
return f"{size:.2f} TB"
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def main() -> None:
|
| 169 |
+
args = parse_args()
|
| 170 |
+
progress = load_progress(args.progress_path)
|
| 171 |
+
api = HfApi(token=args.token)
|
| 172 |
+
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 173 |
+
|
| 174 |
+
try:
|
| 175 |
+
repo_files = api.list_repo_files(repo_id=args.repo_id, repo_type=args.repo_type)
|
| 176 |
+
except Exception:
|
| 177 |
+
repo_files = []
|
| 178 |
+
|
| 179 |
+
while True:
|
| 180 |
+
remaining_folders = list_unuploaded_folders(args.dataset_dir, progress)
|
| 181 |
+
if not remaining_folders:
|
| 182 |
+
print("No unuploaded dataset folders remain.")
|
| 183 |
+
break
|
| 184 |
+
remaining_bytes = total_batchable_bytes(remaining_folders)
|
| 185 |
+
if args.require_target_bytes and remaining_bytes < args.target_bytes:
|
| 186 |
+
print(
|
| 187 |
+
f"Skip upload: only {format_size(remaining_bytes)} of completed NPZ folders available, below target {format_size(args.target_bytes)}."
|
| 188 |
+
)
|
| 189 |
+
break
|
| 190 |
+
|
| 191 |
+
batch = build_batch(remaining_folders, args.target_bytes)
|
| 192 |
+
batch_names = [name for name, _, _ in batch]
|
| 193 |
+
batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
|
| 194 |
+
archive_index = next_archive_index(progress, repo_files)
|
| 195 |
+
archive_name = f"Sign_DWPose_NPZ_{archive_index:06d}.tar"
|
| 196 |
+
archive_path = args.archive_dir / archive_name
|
| 197 |
+
|
| 198 |
+
print(f"Create archive {archive_name} with {len(batch_names)} folders ({format_size(batch_bytes)})")
|
| 199 |
+
for folder_name in batch_names:
|
| 200 |
+
print(f" - {folder_name}")
|
| 201 |
+
|
| 202 |
+
if args.dry_run:
|
| 203 |
+
break
|
| 204 |
+
|
| 205 |
+
args.archive_dir.mkdir(parents=True, exist_ok=True)
|
| 206 |
+
update_many_video_stats(
|
| 207 |
+
args.stats_npz,
|
| 208 |
+
batch_names,
|
| 209 |
+
upload_status="uploading",
|
| 210 |
+
archive_name=archive_name,
|
| 211 |
+
local_cleanup_status="pending",
|
| 212 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 213 |
+
)
|
| 214 |
+
try:
|
| 215 |
+
create_tar_archive(archive_path, args.dataset_dir, batch_names)
|
| 216 |
+
upload_archive(api, args.repo_id, args.repo_type, archive_path)
|
| 217 |
+
except Exception as exc:
|
| 218 |
+
update_many_video_stats(
|
| 219 |
+
args.stats_npz,
|
| 220 |
+
batch_names,
|
| 221 |
+
upload_status="failed",
|
| 222 |
+
local_cleanup_status="pending",
|
| 223 |
+
archive_name=archive_name,
|
| 224 |
+
last_error=str(exc),
|
| 225 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 226 |
+
)
|
| 227 |
+
raise
|
| 228 |
+
|
| 229 |
+
progress["archives"][archive_name] = {
|
| 230 |
+
"folders": batch_names,
|
| 231 |
+
"size_bytes": batch_bytes,
|
| 232 |
+
"uploaded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 233 |
+
}
|
| 234 |
+
for folder_name in batch_names:
|
| 235 |
+
progress["uploaded_folders"][folder_name] = archive_name
|
| 236 |
+
save_progress(args.progress_path, progress)
|
| 237 |
+
|
| 238 |
+
cleanup_error = ""
|
| 239 |
+
try:
|
| 240 |
+
cleanup_local_assets(
|
| 241 |
+
batch_names,
|
| 242 |
+
args.dataset_dir,
|
| 243 |
+
args.raw_video_dir,
|
| 244 |
+
args.raw_caption_dir,
|
| 245 |
+
args.raw_metadata_dir,
|
| 246 |
+
)
|
| 247 |
+
archive_path.unlink(missing_ok=True)
|
| 248 |
+
except Exception as exc:
|
| 249 |
+
cleanup_error = str(exc)
|
| 250 |
+
repo_files.append(archive_name)
|
| 251 |
+
update_many_video_stats(
|
| 252 |
+
args.stats_npz,
|
| 253 |
+
batch_names,
|
| 254 |
+
upload_status="uploaded",
|
| 255 |
+
local_cleanup_status="deleted" if not cleanup_error else "failed",
|
| 256 |
+
archive_name=archive_name,
|
| 257 |
+
last_error=cleanup_error,
|
| 258 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 259 |
+
)
|
| 260 |
+
if cleanup_error:
|
| 261 |
+
raise RuntimeError(f"Uploaded {archive_name} but local cleanup failed: {cleanup_error}")
|
| 262 |
+
print(f"Uploaded {archive_name} and cleaned raw assets for {len(batch_names)} videos.")
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
if __name__ == "__main__":
|
| 266 |
+
main()
|
scripts/visualize_dwpose_npz.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import importlib.util
|
| 5 |
+
import math
|
| 6 |
+
import shutil
|
| 7 |
+
import subprocess
|
| 8 |
+
import tempfile
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, Iterable, List
|
| 11 |
+
|
| 12 |
+
import cv2
|
| 13 |
+
import matplotlib
|
| 14 |
+
import numpy as np
|
| 15 |
+
from PIL import Image
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
REPO_ROOT = Path(__file__).resolve().parents[1]
|
| 19 |
+
|
| 20 |
+
import sys
|
| 21 |
+
|
| 22 |
+
if str(REPO_ROOT) not in sys.path:
|
| 23 |
+
sys.path.insert(0, str(REPO_ROOT))
|
| 24 |
+
|
| 25 |
+
from utils.draw_dw_lib import draw_pose
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
VIDEO_EXTENSIONS = (".mp4", ".mkv", ".mov", ".webm")
|
| 29 |
+
EPS = 0.01
|
| 30 |
+
STABLE_SIGNER_OPENPOSE_PATH = Path(
|
| 31 |
+
"/research/cbim/vast/sf895/code/SignerX-inference-webui/plugins/StableSigner/easy_dwpose/draw/openpose.py"
|
| 32 |
+
)
|
| 33 |
+
_STABLE_SIGNER_OPENPOSE_DRAW = None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def parse_args() -> argparse.Namespace:
|
| 37 |
+
parser = argparse.ArgumentParser(description="Visualize Sign-DWPose NPZ outputs.")
|
| 38 |
+
parser.add_argument("--video-dir", type=Path, required=True, help="Dataset video directory, e.g. dataset/<video_id>")
|
| 39 |
+
parser.add_argument("--npz-dir", type=Path, default=None, help="Optional NPZ directory override")
|
| 40 |
+
parser.add_argument("--raw-video", type=Path, default=None, help="Optional raw video path for overlay rendering")
|
| 41 |
+
parser.add_argument("--fps", type=int, default=24, help="Visualization FPS")
|
| 42 |
+
parser.add_argument("--max-frames", type=int, default=None, help="Limit the number of frames to render")
|
| 43 |
+
parser.add_argument(
|
| 44 |
+
"--draw-style",
|
| 45 |
+
choices=("controlnext", "openpose", "dwpose"),
|
| 46 |
+
default="controlnext",
|
| 47 |
+
help="Rendering style. dwpose is kept as an alias of controlnext.",
|
| 48 |
+
)
|
| 49 |
+
parser.add_argument("--conf-threshold", type=float, default=0.6, help="Confidence threshold for openpose filtering")
|
| 50 |
+
parser.add_argument(
|
| 51 |
+
"--frame-indices",
|
| 52 |
+
default="1,2,3,4",
|
| 53 |
+
help="Comma-separated 1-based frame indices for standalone single-frame previews",
|
| 54 |
+
)
|
| 55 |
+
parser.add_argument(
|
| 56 |
+
"--output-dir",
|
| 57 |
+
type=Path,
|
| 58 |
+
default=None,
|
| 59 |
+
help="Visualization output directory. Defaults to <video-dir>/visualization_dwpose",
|
| 60 |
+
)
|
| 61 |
+
parser.add_argument("--force", action="store_true", help="Overwrite existing visualization outputs")
|
| 62 |
+
return parser.parse_args()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def parse_frame_indices(value: str) -> List[int]:
|
| 66 |
+
indices: List[int] = []
|
| 67 |
+
for item in value.split(","):
|
| 68 |
+
item = item.strip()
|
| 69 |
+
if not item:
|
| 70 |
+
continue
|
| 71 |
+
index = int(item)
|
| 72 |
+
if index > 0:
|
| 73 |
+
indices.append(index)
|
| 74 |
+
return sorted(set(indices))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def normalize_draw_style(value: str) -> str:
|
| 78 |
+
return "controlnext" if value == "dwpose" else value
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_stablesigner_openpose_draw():
|
| 82 |
+
global _STABLE_SIGNER_OPENPOSE_DRAW # noqa: PLW0603
|
| 83 |
+
if _STABLE_SIGNER_OPENPOSE_DRAW is not None:
|
| 84 |
+
return _STABLE_SIGNER_OPENPOSE_DRAW
|
| 85 |
+
if not STABLE_SIGNER_OPENPOSE_PATH.exists():
|
| 86 |
+
return None
|
| 87 |
+
spec = importlib.util.spec_from_file_location("stablesigner_openpose_draw", STABLE_SIGNER_OPENPOSE_PATH)
|
| 88 |
+
if spec is None or spec.loader is None:
|
| 89 |
+
return None
|
| 90 |
+
module = importlib.util.module_from_spec(spec)
|
| 91 |
+
spec.loader.exec_module(module)
|
| 92 |
+
_STABLE_SIGNER_OPENPOSE_DRAW = getattr(module, "draw_pose", None)
|
| 93 |
+
return _STABLE_SIGNER_OPENPOSE_DRAW
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def load_npz_frame(npz_path: Path) -> Dict[str, object]:
|
| 97 |
+
payload = np.load(npz_path, allow_pickle=True)
|
| 98 |
+
frame: Dict[str, object] = {}
|
| 99 |
+
frame["num_persons"] = int(payload["num_persons"])
|
| 100 |
+
frame["frame_width"] = int(payload["frame_width"])
|
| 101 |
+
frame["frame_height"] = int(payload["frame_height"])
|
| 102 |
+
|
| 103 |
+
for person_idx in range(frame["num_persons"]):
|
| 104 |
+
source_prefix = f"person_{person_idx:03d}"
|
| 105 |
+
target_prefix = f"person_{person_idx}"
|
| 106 |
+
person_data: Dict[str, np.ndarray] = {}
|
| 107 |
+
for suffix in (
|
| 108 |
+
"body_keypoints",
|
| 109 |
+
"body_scores",
|
| 110 |
+
"face_keypoints",
|
| 111 |
+
"face_scores",
|
| 112 |
+
"left_hand_keypoints",
|
| 113 |
+
"left_hand_scores",
|
| 114 |
+
"right_hand_keypoints",
|
| 115 |
+
"right_hand_scores",
|
| 116 |
+
):
|
| 117 |
+
key = f"{source_prefix}_{suffix}"
|
| 118 |
+
if key in payload.files:
|
| 119 |
+
person_data[suffix] = payload[key]
|
| 120 |
+
if person_data:
|
| 121 |
+
frame[target_prefix] = person_data
|
| 122 |
+
return frame
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def to_openpose_frame(frame: Dict[str, object]) -> Dict[str, np.ndarray]:
|
| 126 |
+
num_persons = int(frame["num_persons"])
|
| 127 |
+
bodies: List[np.ndarray] = []
|
| 128 |
+
body_scores: List[np.ndarray] = []
|
| 129 |
+
hands: List[np.ndarray] = []
|
| 130 |
+
hand_scores: List[np.ndarray] = []
|
| 131 |
+
faces: List[np.ndarray] = []
|
| 132 |
+
face_scores: List[np.ndarray] = []
|
| 133 |
+
|
| 134 |
+
for person_idx in range(num_persons):
|
| 135 |
+
person = frame.get(f"person_{person_idx}")
|
| 136 |
+
if not isinstance(person, dict):
|
| 137 |
+
continue
|
| 138 |
+
bodies.append(np.asarray(person["body_keypoints"], dtype=np.float32))
|
| 139 |
+
body_scores.append(np.asarray(person["body_scores"], dtype=np.float32))
|
| 140 |
+
hands.extend(
|
| 141 |
+
[
|
| 142 |
+
np.asarray(person["left_hand_keypoints"], dtype=np.float32),
|
| 143 |
+
np.asarray(person["right_hand_keypoints"], dtype=np.float32),
|
| 144 |
+
]
|
| 145 |
+
)
|
| 146 |
+
hand_scores.extend(
|
| 147 |
+
[
|
| 148 |
+
np.asarray(person["left_hand_scores"], dtype=np.float32),
|
| 149 |
+
np.asarray(person["right_hand_scores"], dtype=np.float32),
|
| 150 |
+
]
|
| 151 |
+
)
|
| 152 |
+
faces.append(np.asarray(person["face_keypoints"], dtype=np.float32))
|
| 153 |
+
face_scores.append(np.asarray(person["face_scores"], dtype=np.float32))
|
| 154 |
+
|
| 155 |
+
if bodies:
|
| 156 |
+
stacked_bodies = np.vstack(bodies)
|
| 157 |
+
stacked_subset = np.vstack(body_scores)
|
| 158 |
+
else:
|
| 159 |
+
stacked_bodies = np.zeros((0, 2), dtype=np.float32)
|
| 160 |
+
stacked_subset = np.zeros((0, 18), dtype=np.float32)
|
| 161 |
+
|
| 162 |
+
return {
|
| 163 |
+
"bodies": stacked_bodies,
|
| 164 |
+
"body_scores": stacked_subset,
|
| 165 |
+
"hands": np.asarray(hands, dtype=np.float32) if hands else np.zeros((0, 21, 2), dtype=np.float32),
|
| 166 |
+
"hands_scores": np.asarray(hand_scores, dtype=np.float32) if hand_scores else np.zeros((0, 21), dtype=np.float32),
|
| 167 |
+
"faces": np.asarray(faces, dtype=np.float32) if faces else np.zeros((0, 68, 2), dtype=np.float32),
|
| 168 |
+
"faces_scores": np.asarray(face_scores, dtype=np.float32) if face_scores else np.zeros((0, 68), dtype=np.float32),
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def filter_pose_for_openpose(frame: Dict[str, np.ndarray], conf_threshold: float, update_subset: bool) -> Dict[str, np.ndarray]:
|
| 173 |
+
filtered = {key: np.array(value, copy=True) for key, value in frame.items()}
|
| 174 |
+
|
| 175 |
+
bodies = filtered.get("bodies", None)
|
| 176 |
+
body_scores = filtered.get("body_scores", None)
|
| 177 |
+
if bodies is not None:
|
| 178 |
+
bodies = bodies.copy()
|
| 179 |
+
min_valid = 1e-6
|
| 180 |
+
coord_mask = (bodies[:, 0] > min_valid) & (bodies[:, 1] > min_valid)
|
| 181 |
+
|
| 182 |
+
conf_mask = None
|
| 183 |
+
if body_scores is not None:
|
| 184 |
+
scores = np.array(body_scores, copy=False)
|
| 185 |
+
score_vec = scores.reshape(-1) if scores.ndim == 2 else scores
|
| 186 |
+
score_vec = score_vec.astype(float)
|
| 187 |
+
conf_mask = score_vec < conf_threshold
|
| 188 |
+
if conf_mask.shape[0] < bodies.shape[0]:
|
| 189 |
+
conf_mask = np.pad(conf_mask, (0, bodies.shape[0] - conf_mask.shape[0]), constant_values=False)
|
| 190 |
+
elif conf_mask.shape[0] > bodies.shape[0]:
|
| 191 |
+
conf_mask = conf_mask[: bodies.shape[0]]
|
| 192 |
+
valid_mask = coord_mask if conf_mask is None else (coord_mask & (~conf_mask))
|
| 193 |
+
bodies[~valid_mask, :] = 0
|
| 194 |
+
filtered["bodies"] = bodies
|
| 195 |
+
|
| 196 |
+
if update_subset:
|
| 197 |
+
if body_scores is not None:
|
| 198 |
+
subset = np.array(body_scores, copy=True)
|
| 199 |
+
if subset.ndim == 1:
|
| 200 |
+
subset = subset.reshape(1, -1)
|
| 201 |
+
else:
|
| 202 |
+
subset = np.arange(bodies.shape[0], dtype=float).reshape(1, -1)
|
| 203 |
+
if subset.shape[1] < bodies.shape[0]:
|
| 204 |
+
subset = np.pad(subset, ((0, 0), (0, bodies.shape[0] - subset.shape[1])), constant_values=-1)
|
| 205 |
+
elif subset.shape[1] > bodies.shape[0]:
|
| 206 |
+
subset = subset[:, : bodies.shape[0]]
|
| 207 |
+
subset[:, ~valid_mask] = -1
|
| 208 |
+
filtered["body_scores"] = subset
|
| 209 |
+
|
| 210 |
+
hands = filtered.get("hands", None)
|
| 211 |
+
hand_scores = filtered.get("hands_scores", None)
|
| 212 |
+
if hands is not None and hand_scores is not None:
|
| 213 |
+
scores = np.array(hand_scores)
|
| 214 |
+
hands = hands.copy()
|
| 215 |
+
if hands.ndim == 3 and scores.ndim == 2:
|
| 216 |
+
for hand_index in range(hands.shape[0]):
|
| 217 |
+
mask = (scores[hand_index] < conf_threshold) | (scores[hand_index] <= 0)
|
| 218 |
+
hands[hand_index][mask, :] = 0
|
| 219 |
+
filtered["hands"] = hands
|
| 220 |
+
|
| 221 |
+
faces = filtered.get("faces", None)
|
| 222 |
+
face_scores = filtered.get("faces_scores", None)
|
| 223 |
+
if faces is not None and face_scores is not None:
|
| 224 |
+
scores = np.array(face_scores)
|
| 225 |
+
faces = faces.copy()
|
| 226 |
+
if faces.ndim == 3 and scores.ndim == 2:
|
| 227 |
+
for face_index in range(faces.shape[0]):
|
| 228 |
+
mask = (scores[face_index] < conf_threshold) | (scores[face_index] <= 0)
|
| 229 |
+
faces[face_index][mask, :] = 0
|
| 230 |
+
filtered["faces"] = faces
|
| 231 |
+
|
| 232 |
+
return filtered
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def draw_openpose_body(canvas: np.ndarray, candidate: np.ndarray, subset: np.ndarray, score: np.ndarray, conf_threshold: float) -> np.ndarray:
|
| 236 |
+
height, width, _ = canvas.shape
|
| 237 |
+
limb_seq = [
|
| 238 |
+
[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], [10, 11],
|
| 239 |
+
[2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18],
|
| 240 |
+
]
|
| 241 |
+
colors = [
|
| 242 |
+
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
|
| 243 |
+
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
|
| 244 |
+
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85],
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
for limb_index in range(17):
|
| 248 |
+
for person_index in range(len(subset)):
|
| 249 |
+
index = subset[person_index][np.array(limb_seq[limb_index]) - 1]
|
| 250 |
+
if -1 in index:
|
| 251 |
+
continue
|
| 252 |
+
confidence = score[person_index][np.array(limb_seq[limb_index]) - 1]
|
| 253 |
+
if confidence[0] < conf_threshold or confidence[1] < conf_threshold:
|
| 254 |
+
continue
|
| 255 |
+
coords = candidate[index.astype(int)]
|
| 256 |
+
if np.any(coords <= EPS):
|
| 257 |
+
continue
|
| 258 |
+
y_coords = coords[:, 0] * float(width)
|
| 259 |
+
x_coords = coords[:, 1] * float(height)
|
| 260 |
+
mean_x = np.mean(x_coords)
|
| 261 |
+
mean_y = np.mean(y_coords)
|
| 262 |
+
length = ((x_coords[0] - x_coords[1]) ** 2 + (y_coords[0] - y_coords[1]) ** 2) ** 0.5
|
| 263 |
+
angle = math.degrees(math.atan2(x_coords[0] - x_coords[1], y_coords[0] - y_coords[1]))
|
| 264 |
+
polygon = cv2.ellipse2Poly((int(mean_y), int(mean_x)), (int(length / 2), 4), int(angle), 0, 360, 1)
|
| 265 |
+
cv2.fillConvexPoly(canvas, polygon, colors[limb_index])
|
| 266 |
+
|
| 267 |
+
canvas = (canvas * 0.6).astype(np.uint8)
|
| 268 |
+
for keypoint_index in range(18):
|
| 269 |
+
for person_index in range(len(subset)):
|
| 270 |
+
index = int(subset[person_index][keypoint_index])
|
| 271 |
+
if index == -1 or score[person_index][keypoint_index] < conf_threshold:
|
| 272 |
+
continue
|
| 273 |
+
x_value, y_value = candidate[index][0:2]
|
| 274 |
+
cv2.circle(canvas, (int(x_value * width), int(y_value * height)), 4, colors[keypoint_index], thickness=-1)
|
| 275 |
+
return canvas
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def draw_openpose_hands(canvas: np.ndarray, hand_peaks: np.ndarray, hand_scores: np.ndarray, conf_threshold: float) -> np.ndarray:
|
| 279 |
+
height, width, _ = canvas.shape
|
| 280 |
+
edges = [
|
| 281 |
+
[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10],
|
| 282 |
+
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20],
|
| 283 |
+
]
|
| 284 |
+
for hand_index, peaks in enumerate(hand_peaks):
|
| 285 |
+
scores = hand_scores[hand_index] if len(hand_scores) > hand_index else None
|
| 286 |
+
for edge_index, edge in enumerate(edges):
|
| 287 |
+
x1, y1 = peaks[edge[0]]
|
| 288 |
+
x2, y2 = peaks[edge[1]]
|
| 289 |
+
if scores is not None and (scores[edge[0]] < conf_threshold or scores[edge[1]] < conf_threshold):
|
| 290 |
+
continue
|
| 291 |
+
x1 = int(x1 * width)
|
| 292 |
+
y1 = int(y1 * height)
|
| 293 |
+
x2 = int(x2 * width)
|
| 294 |
+
y2 = int(y2 * height)
|
| 295 |
+
if x1 > EPS and y1 > EPS and x2 > EPS and y2 > EPS:
|
| 296 |
+
cv2.line(
|
| 297 |
+
canvas,
|
| 298 |
+
(x1, y1),
|
| 299 |
+
(x2, y2),
|
| 300 |
+
matplotlib.colors.hsv_to_rgb([edge_index / float(len(edges)), 1.0, 1.0]) * 255,
|
| 301 |
+
thickness=2,
|
| 302 |
+
)
|
| 303 |
+
for point_index, point in enumerate(peaks):
|
| 304 |
+
if scores is not None and scores[point_index] < conf_threshold:
|
| 305 |
+
continue
|
| 306 |
+
x_value = int(point[0] * width)
|
| 307 |
+
y_value = int(point[1] * height)
|
| 308 |
+
if x_value > EPS and y_value > EPS:
|
| 309 |
+
cv2.circle(canvas, (x_value, y_value), 4, (0, 0, 255), thickness=-1)
|
| 310 |
+
return canvas
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def draw_openpose_faces(canvas: np.ndarray, face_points: np.ndarray, face_scores: np.ndarray, conf_threshold: float) -> np.ndarray:
|
| 314 |
+
height, width, _ = canvas.shape
|
| 315 |
+
for face_index, points in enumerate(face_points):
|
| 316 |
+
scores = face_scores[face_index] if len(face_scores) > face_index else None
|
| 317 |
+
for point_index, point in enumerate(points):
|
| 318 |
+
if scores is not None and scores[point_index] < conf_threshold:
|
| 319 |
+
continue
|
| 320 |
+
x_value = int(point[0] * width)
|
| 321 |
+
y_value = int(point[1] * height)
|
| 322 |
+
if x_value > EPS and y_value > EPS:
|
| 323 |
+
cv2.circle(canvas, (x_value, y_value), 3, (255, 255, 255), thickness=-1)
|
| 324 |
+
return canvas
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def draw_openpose_frame(frame: Dict[str, np.ndarray], width: int, height: int, conf_threshold: float) -> Image.Image:
|
| 328 |
+
draw_func = get_stablesigner_openpose_draw()
|
| 329 |
+
if draw_func is not None:
|
| 330 |
+
canvas = draw_func(
|
| 331 |
+
pose=frame,
|
| 332 |
+
height=height,
|
| 333 |
+
width=width,
|
| 334 |
+
include_face=True,
|
| 335 |
+
include_hands=True,
|
| 336 |
+
conf_threshold=conf_threshold,
|
| 337 |
+
)
|
| 338 |
+
return Image.fromarray(canvas, "RGB")
|
| 339 |
+
|
| 340 |
+
canvas = np.zeros((height, width, 3), dtype=np.uint8)
|
| 341 |
+
bodies = frame["bodies"]
|
| 342 |
+
subset = frame.get("body_scores", np.zeros((1, 18), dtype=np.float32))
|
| 343 |
+
if subset.ndim == 1:
|
| 344 |
+
subset = subset.reshape(1, -1)
|
| 345 |
+
canvas = draw_openpose_body(canvas, bodies, subset, subset, conf_threshold)
|
| 346 |
+
if len(frame.get("faces", [])) > 0:
|
| 347 |
+
canvas = draw_openpose_faces(canvas, frame["faces"], frame.get("faces_scores", np.zeros((0, 68))), conf_threshold)
|
| 348 |
+
if len(frame.get("hands", [])) > 0:
|
| 349 |
+
canvas = draw_openpose_hands(canvas, frame["hands"], frame.get("hands_scores", np.zeros((0, 21))), conf_threshold)
|
| 350 |
+
return Image.fromarray(canvas, "RGB")
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def render_pose_image(frame: Dict[str, object], draw_style: str, transparent: bool, conf_threshold: float) -> Image.Image:
|
| 354 |
+
width = int(frame["frame_width"])
|
| 355 |
+
height = int(frame["frame_height"])
|
| 356 |
+
if draw_style == "openpose":
|
| 357 |
+
openpose_frame = filter_pose_for_openpose(
|
| 358 |
+
to_openpose_frame(frame),
|
| 359 |
+
conf_threshold=conf_threshold,
|
| 360 |
+
update_subset=True,
|
| 361 |
+
)
|
| 362 |
+
image = draw_openpose_frame(openpose_frame, width, height, conf_threshold)
|
| 363 |
+
if not transparent:
|
| 364 |
+
return image
|
| 365 |
+
rgba = image.convert("RGBA")
|
| 366 |
+
alpha = np.where(np.array(image).sum(axis=2) > 0, 255, 0).astype(np.uint8)
|
| 367 |
+
rgba.putalpha(Image.fromarray(alpha, "L"))
|
| 368 |
+
return rgba
|
| 369 |
+
|
| 370 |
+
rendered = draw_pose(
|
| 371 |
+
frame,
|
| 372 |
+
H=height,
|
| 373 |
+
W=width,
|
| 374 |
+
include_body=True,
|
| 375 |
+
include_hand=True,
|
| 376 |
+
include_face=True,
|
| 377 |
+
transparent=transparent,
|
| 378 |
+
)
|
| 379 |
+
rendered = np.transpose(rendered, (1, 2, 0))
|
| 380 |
+
if rendered.dtype != np.uint8:
|
| 381 |
+
rendered = np.clip(rendered * 255.0, 0, 255).astype(np.uint8)
|
| 382 |
+
return Image.fromarray(rendered, "RGBA" if transparent else "RGB")
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def save_frame_previews(npz_paths: Iterable[Path], single_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
|
| 386 |
+
single_frame_dir.mkdir(parents=True, exist_ok=True)
|
| 387 |
+
for npz_path in npz_paths:
|
| 388 |
+
frame = load_npz_frame(npz_path)
|
| 389 |
+
image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
|
| 390 |
+
image.save(single_frame_dir / f"{npz_path.stem}.png")
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def render_pose_frames(npz_paths: List[Path], pose_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
|
| 394 |
+
pose_frame_dir.mkdir(parents=True, exist_ok=True)
|
| 395 |
+
total = len(npz_paths)
|
| 396 |
+
for index, npz_path in enumerate(npz_paths, start=1):
|
| 397 |
+
frame = load_npz_frame(npz_path)
|
| 398 |
+
image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
|
| 399 |
+
image.save(pose_frame_dir / f"{npz_path.stem}.png")
|
| 400 |
+
if index == 1 or index % 100 == 0 or index == total:
|
| 401 |
+
print(f"Rendered pose frame {index}/{total}: {npz_path.name}")
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def create_video_from_frames(frame_dir: Path, output_path: Path, fps: int) -> None:
|
| 405 |
+
if not any(frame_dir.glob("*.png")):
|
| 406 |
+
return
|
| 407 |
+
command = [
|
| 408 |
+
"ffmpeg",
|
| 409 |
+
"-hide_banner",
|
| 410 |
+
"-loglevel",
|
| 411 |
+
"error",
|
| 412 |
+
"-y",
|
| 413 |
+
"-framerate",
|
| 414 |
+
str(fps),
|
| 415 |
+
"-i",
|
| 416 |
+
str(frame_dir / "%08d.png"),
|
| 417 |
+
"-c:v",
|
| 418 |
+
"libx264",
|
| 419 |
+
"-pix_fmt",
|
| 420 |
+
"yuv420p",
|
| 421 |
+
str(output_path),
|
| 422 |
+
]
|
| 423 |
+
subprocess.run(command, check=True)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def resolve_raw_video(video_dir: Path, raw_video: Path | None) -> Path | None:
|
| 427 |
+
if raw_video is not None and raw_video.exists():
|
| 428 |
+
return raw_video
|
| 429 |
+
video_id = video_dir.name
|
| 430 |
+
raw_root = REPO_ROOT / "raw_video"
|
| 431 |
+
for extension in VIDEO_EXTENSIONS:
|
| 432 |
+
candidate = raw_root / f"{video_id}{extension}"
|
| 433 |
+
if candidate.exists():
|
| 434 |
+
return candidate
|
| 435 |
+
return None
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def extract_video_frames(raw_video: Path, fps: int, temp_dir: Path) -> List[Path]:
|
| 439 |
+
temp_dir.mkdir(parents=True, exist_ok=True)
|
| 440 |
+
command = [
|
| 441 |
+
"ffmpeg",
|
| 442 |
+
"-hide_banner",
|
| 443 |
+
"-loglevel",
|
| 444 |
+
"error",
|
| 445 |
+
"-y",
|
| 446 |
+
"-i",
|
| 447 |
+
str(raw_video),
|
| 448 |
+
"-vf",
|
| 449 |
+
f"fps={fps}",
|
| 450 |
+
str(temp_dir / "%08d.png"),
|
| 451 |
+
]
|
| 452 |
+
subprocess.run(command, check=True)
|
| 453 |
+
return sorted(temp_dir.glob("*.png"))
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def render_overlay_frames(
|
| 457 |
+
npz_paths: List[Path],
|
| 458 |
+
raw_frame_paths: List[Path],
|
| 459 |
+
overlay_dir: Path,
|
| 460 |
+
draw_style: str,
|
| 461 |
+
conf_threshold: float,
|
| 462 |
+
) -> None:
|
| 463 |
+
overlay_dir.mkdir(parents=True, exist_ok=True)
|
| 464 |
+
frame_count = min(len(npz_paths), len(raw_frame_paths))
|
| 465 |
+
for index, (npz_path, raw_frame_path) in enumerate(zip(npz_paths[:frame_count], raw_frame_paths[:frame_count]), start=1):
|
| 466 |
+
frame = load_npz_frame(npz_path)
|
| 467 |
+
pose_rgba = render_pose_image(frame, draw_style=draw_style, transparent=True, conf_threshold=conf_threshold)
|
| 468 |
+
with Image.open(raw_frame_path) as raw_image:
|
| 469 |
+
base = raw_image.convert("RGBA")
|
| 470 |
+
overlay = Image.alpha_composite(base, pose_rgba)
|
| 471 |
+
overlay.save(overlay_dir / f"{npz_path.stem}.png")
|
| 472 |
+
if index == 1 or index % 100 == 0 or index == frame_count:
|
| 473 |
+
print(f"Rendered overlay frame {index}/{frame_count}: {npz_path.name}")
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def main() -> None:
|
| 477 |
+
args = parse_args()
|
| 478 |
+
args.draw_style = normalize_draw_style(args.draw_style)
|
| 479 |
+
video_dir = args.video_dir.resolve()
|
| 480 |
+
npz_dir = (args.npz_dir or (video_dir / "npz")).resolve()
|
| 481 |
+
output_dir = (args.output_dir or (video_dir / f"visualization_{args.draw_style}")).resolve()
|
| 482 |
+
pose_frame_dir = output_dir / "pose_frames"
|
| 483 |
+
single_frame_dir = output_dir / "single_frames"
|
| 484 |
+
overlay_frame_dir = output_dir / "overlay_frames"
|
| 485 |
+
pose_video_path = output_dir / f"visualization_{args.draw_style}.mp4"
|
| 486 |
+
overlay_video_path = output_dir / f"visualization_{args.draw_style}_overlay.mp4"
|
| 487 |
+
|
| 488 |
+
if not npz_dir.exists():
|
| 489 |
+
raise FileNotFoundError(f"NPZ directory not found: {npz_dir}")
|
| 490 |
+
|
| 491 |
+
npz_paths = sorted(npz_dir.glob("*.npz"))
|
| 492 |
+
if args.max_frames is not None:
|
| 493 |
+
npz_paths = npz_paths[: args.max_frames]
|
| 494 |
+
if not npz_paths:
|
| 495 |
+
raise FileNotFoundError(f"No NPZ files found in {npz_dir}")
|
| 496 |
+
|
| 497 |
+
if output_dir.exists() and args.force:
|
| 498 |
+
shutil.rmtree(output_dir)
|
| 499 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 500 |
+
|
| 501 |
+
preview_indices = parse_frame_indices(args.frame_indices)
|
| 502 |
+
preview_paths = [
|
| 503 |
+
npz_paths[index - 1]
|
| 504 |
+
for index in preview_indices
|
| 505 |
+
if 0 < index <= len(npz_paths)
|
| 506 |
+
]
|
| 507 |
+
save_frame_previews(preview_paths, single_frame_dir, args.draw_style, args.conf_threshold)
|
| 508 |
+
|
| 509 |
+
render_pose_frames(npz_paths, pose_frame_dir, args.draw_style, args.conf_threshold)
|
| 510 |
+
create_video_from_frames(pose_frame_dir, pose_video_path, args.fps)
|
| 511 |
+
|
| 512 |
+
raw_video = resolve_raw_video(video_dir, args.raw_video)
|
| 513 |
+
if raw_video is None:
|
| 514 |
+
print("No raw video found for overlay rendering. Pose-only outputs were created.")
|
| 515 |
+
return
|
| 516 |
+
|
| 517 |
+
temp_root = Path(tempfile.mkdtemp(prefix="sign_dwpose_overlay_"))
|
| 518 |
+
try:
|
| 519 |
+
raw_frame_paths = extract_video_frames(raw_video, args.fps, temp_root)
|
| 520 |
+
render_overlay_frames(npz_paths, raw_frame_paths, overlay_frame_dir, args.draw_style, args.conf_threshold)
|
| 521 |
+
create_video_from_frames(overlay_frame_dir, overlay_video_path, args.fps)
|
| 522 |
+
finally:
|
| 523 |
+
shutil.rmtree(temp_root, ignore_errors=True)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
if __name__ == "__main__":
|
| 527 |
+
main()
|
utils/__pycache__/draw_dw_lib.cpython-310.pyc
ADDED
|
Binary file (7.89 kB). View file
|
|
|
utils/__pycache__/draw_dw_lib.cpython-38.pyc
ADDED
|
Binary file (7.06 kB). View file
|
|
|
utils/__pycache__/preprocess_video.cpython-310.pyc
ADDED
|
Binary file (4.62 kB). View file
|
|
|
utils/__pycache__/preprocess_video.cpython-38.pyc
ADDED
|
Binary file (4.67 kB). View file
|
|
|
utils/__pycache__/preprocess_video_improve.cpython-310.pyc
ADDED
|
Binary file (6.4 kB). View file
|
|
|
utils/__pycache__/stats_npz.cpython-310.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
utils/__pycache__/stats_npz.cpython-312.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
utils/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (3.86 kB). View file
|
|
|
utils/__pycache__/util.cpython-38.pyc
ADDED
|
Binary file (3.89 kB). View file
|
|
|
utils/draw_dw_lib.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib
|
| 4 |
+
import cv2
|
| 5 |
+
import sys
|
| 6 |
+
import os
|
| 7 |
+
import _pickle as cPickle
|
| 8 |
+
import gzip
|
| 9 |
+
import subprocess
|
| 10 |
+
import torch
|
| 11 |
+
import colorsys
|
| 12 |
+
from typing import List, Dict, Any, Optional, Tuple
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
eps = 0.01
|
| 16 |
+
|
| 17 |
+
def alpha_blend_color(color, alpha):
|
| 18 |
+
"""blend color according to point conf
|
| 19 |
+
"""
|
| 20 |
+
return [int(c * alpha) for c in color]
|
| 21 |
+
|
| 22 |
+
def draw_bodypose(canvas, candidate, subset, score, transparent=False):
|
| 23 |
+
"""Draw body pose on canvas
|
| 24 |
+
Args:
|
| 25 |
+
canvas: numpy array canvas to draw on
|
| 26 |
+
candidate: pose candidate
|
| 27 |
+
subset: pose subset
|
| 28 |
+
score: confidence scores
|
| 29 |
+
transparent: whether to use transparent background
|
| 30 |
+
Returns:
|
| 31 |
+
canvas: drawn canvas
|
| 32 |
+
"""
|
| 33 |
+
H, W, C = canvas.shape
|
| 34 |
+
candidate = np.array(candidate)
|
| 35 |
+
subset = np.array(subset)
|
| 36 |
+
|
| 37 |
+
stickwidth = 4
|
| 38 |
+
|
| 39 |
+
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
|
| 40 |
+
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
|
| 41 |
+
[1, 16], [16, 18], [3, 17], [6, 18]]
|
| 42 |
+
|
| 43 |
+
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
|
| 44 |
+
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
|
| 45 |
+
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
|
| 46 |
+
|
| 47 |
+
# Add alpha channel if transparent
|
| 48 |
+
if transparent:
|
| 49 |
+
colors = [color + [255] for color in colors]
|
| 50 |
+
|
| 51 |
+
for i in range(17):
|
| 52 |
+
for n in range(len(subset)):
|
| 53 |
+
index = subset[n][np.array(limbSeq[i]) - 1]
|
| 54 |
+
conf = score[n][np.array(limbSeq[i]) - 1]
|
| 55 |
+
if conf[0] < 0.3 or conf[1] < 0.3:
|
| 56 |
+
continue
|
| 57 |
+
Y = candidate[index.astype(int), 0] * float(W)
|
| 58 |
+
X = candidate[index.astype(int), 1] * float(H)
|
| 59 |
+
mX = np.mean(X)
|
| 60 |
+
mY = np.mean(Y)
|
| 61 |
+
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
|
| 62 |
+
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
| 63 |
+
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
| 64 |
+
if transparent:
|
| 65 |
+
color = colors[i][:-1] + [int(255 * conf[0] * conf[1])] # Adjust alpha based on confidence
|
| 66 |
+
else:
|
| 67 |
+
color = colors[i]
|
| 68 |
+
cv2.fillConvexPoly(canvas, polygon, color)
|
| 69 |
+
|
| 70 |
+
canvas = (canvas * 0.6).astype(np.uint8)
|
| 71 |
+
|
| 72 |
+
for i in range(18):
|
| 73 |
+
for n in range(len(subset)):
|
| 74 |
+
index = int(subset[n][i])
|
| 75 |
+
if index == -1:
|
| 76 |
+
continue
|
| 77 |
+
x, y = candidate[index][0:2]
|
| 78 |
+
conf = score[n][i]
|
| 79 |
+
x = int(x * W)
|
| 80 |
+
y = int(y * H)
|
| 81 |
+
if transparent:
|
| 82 |
+
color = colors[i][:-1] + [int(255 * conf)] # Adjust alpha based on confidence
|
| 83 |
+
else:
|
| 84 |
+
color = colors[i]
|
| 85 |
+
cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1)
|
| 86 |
+
|
| 87 |
+
return canvas
|
| 88 |
+
|
| 89 |
+
def draw_handpose(canvas, all_hand_peaks, all_hand_scores, transparent=False):
|
| 90 |
+
"""Draw hand pose on canvas"""
|
| 91 |
+
H, W, C = canvas.shape
|
| 92 |
+
|
| 93 |
+
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10],
|
| 94 |
+
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
|
| 95 |
+
|
| 96 |
+
for peaks, scores in zip(all_hand_peaks, all_hand_scores):
|
| 97 |
+
for ie, e in enumerate(edges):
|
| 98 |
+
x1, y1 = peaks[e[0]]
|
| 99 |
+
x2, y2 = peaks[e[1]]
|
| 100 |
+
x1 = int(x1 * W)
|
| 101 |
+
y1 = int(y1 * H)
|
| 102 |
+
x2 = int(x2 * W)
|
| 103 |
+
y2 = int(y2 * H)
|
| 104 |
+
score = scores[e[0]] * scores[e[1]]
|
| 105 |
+
if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
|
| 106 |
+
color = matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0])
|
| 107 |
+
if transparent:
|
| 108 |
+
color = np.append(color, score) # Add alpha channel
|
| 109 |
+
else:
|
| 110 |
+
color = color * score
|
| 111 |
+
cv2.line(canvas, (x1, y1), (x2, y2), color * 255, thickness=2)
|
| 112 |
+
|
| 113 |
+
for i, keypoint in enumerate(peaks):
|
| 114 |
+
x, y = keypoint
|
| 115 |
+
x = int(x * W)
|
| 116 |
+
y = int(y * H)
|
| 117 |
+
if x > eps and y > eps:
|
| 118 |
+
if transparent:
|
| 119 |
+
color = (0, 0, 0, scores[i]) # Black with alpha
|
| 120 |
+
else:
|
| 121 |
+
color = (0, 0, int(scores[i] * 255)) # Original color
|
| 122 |
+
cv2.circle(canvas, (x, y), 4, color, thickness=-1)
|
| 123 |
+
return canvas
|
| 124 |
+
|
| 125 |
+
def draw_facepose(canvas, all_lmks, all_scores, transparent=False):
|
| 126 |
+
"""Draw face pose on canvas"""
|
| 127 |
+
H, W, C = canvas.shape
|
| 128 |
+
for lmks, scores in zip(all_lmks, all_scores):
|
| 129 |
+
for lmk, score in zip(lmks, scores):
|
| 130 |
+
x, y = lmk
|
| 131 |
+
x = int(x * W)
|
| 132 |
+
y = int(y * H)
|
| 133 |
+
if x > eps and y > eps:
|
| 134 |
+
if transparent:
|
| 135 |
+
color = (255, 255, 255, int(score * 255)) # White with alpha
|
| 136 |
+
else:
|
| 137 |
+
conf = int(score * 255)
|
| 138 |
+
color = (conf, conf, conf) # Original grayscale
|
| 139 |
+
cv2.circle(canvas, (x, y), 3, color, thickness=-1)
|
| 140 |
+
return canvas
|
| 141 |
+
|
| 142 |
+
def draw_pose(pose, H, W, include_body=True, include_hand=True, include_face=True, ref_w=2160, transparent=False):
|
| 143 |
+
"""vis dwpose outputs with optional transparent background
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
pose (Dict): DWposeDetector outputs - 支持新的person_id格式和旧格式
|
| 147 |
+
H (int): height
|
| 148 |
+
W (int): width
|
| 149 |
+
include_body (bool): whether to draw body keypoints
|
| 150 |
+
include_hand (bool): whether to draw hand keypoints
|
| 151 |
+
include_face (bool): whether to draw face keypoints
|
| 152 |
+
ref_w (int, optional): reference width. Defaults to 2160.
|
| 153 |
+
transparent (bool, optional): whether to use transparent background. Defaults to False.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
np.ndarray: image pixel value in RGBA mode if transparent=True, otherwise RGB mode
|
| 157 |
+
"""
|
| 158 |
+
sz = min(H, W)
|
| 159 |
+
sr = (ref_w / sz) if sz != ref_w else 1
|
| 160 |
+
|
| 161 |
+
# Create canvas - now with alpha channel if transparent
|
| 162 |
+
if transparent:
|
| 163 |
+
canvas = np.zeros(shape=(int(H*sr), int(W*sr), 4), dtype=np.uint8)
|
| 164 |
+
else:
|
| 165 |
+
canvas = np.zeros(shape=(int(H*sr), int(W*sr), 3), dtype=np.uint8)
|
| 166 |
+
|
| 167 |
+
# 检查是否是新的person_id数据格式
|
| 168 |
+
if 'num_persons' in pose and pose['num_persons'] > 0:
|
| 169 |
+
# 使用新的多人数据结构
|
| 170 |
+
processed_data = process_pose_data(pose, H, W)
|
| 171 |
+
bodies = processed_data['bodies']
|
| 172 |
+
faces = processed_data['faces']
|
| 173 |
+
hands = processed_data['hands']
|
| 174 |
+
candidate = bodies['candidate']
|
| 175 |
+
subset = bodies['subset']
|
| 176 |
+
|
| 177 |
+
if include_body:
|
| 178 |
+
canvas = draw_bodypose(canvas, candidate, subset, score=bodies['score'], transparent=transparent)
|
| 179 |
+
|
| 180 |
+
if include_hand:
|
| 181 |
+
canvas = draw_handpose(canvas, hands, processed_data['hands_score'], transparent=transparent)
|
| 182 |
+
|
| 183 |
+
if include_face:
|
| 184 |
+
canvas = draw_facepose(canvas, faces, processed_data['faces_score'], transparent=transparent)
|
| 185 |
+
|
| 186 |
+
else:
|
| 187 |
+
# 兼容旧的数据格式 - 作为备选方案
|
| 188 |
+
try:
|
| 189 |
+
bodies = pose['bodies']
|
| 190 |
+
faces = pose['faces']
|
| 191 |
+
hands = pose['hands']
|
| 192 |
+
candidate = bodies['candidate']
|
| 193 |
+
subset = bodies['subset']
|
| 194 |
+
|
| 195 |
+
if include_body:
|
| 196 |
+
canvas = draw_bodypose(canvas, candidate, subset, score=bodies['score'], transparent=transparent)
|
| 197 |
+
|
| 198 |
+
if include_hand:
|
| 199 |
+
canvas = draw_handpose(canvas, hands, pose['hands_score'], transparent=transparent)
|
| 200 |
+
|
| 201 |
+
if include_face:
|
| 202 |
+
canvas = draw_facepose(canvas, faces, pose['faces_score'], transparent=transparent)
|
| 203 |
+
except Exception as e:
|
| 204 |
+
print(f"绘制旧格式数据失败: {str(e)}")
|
| 205 |
+
# 返回空画布
|
| 206 |
+
pass
|
| 207 |
+
|
| 208 |
+
if transparent:
|
| 209 |
+
return cv2.cvtColor(cv2.resize(canvas, (W, H)), cv2.COLOR_BGRA2RGBA).transpose(2, 0, 1)
|
| 210 |
+
else:
|
| 211 |
+
return cv2.cvtColor(cv2.resize(canvas, (W, H)), cv2.COLOR_BGR2RGB).transpose(2, 0, 1)
|
| 212 |
+
|
| 213 |
+
def process_pose_data(pose_data: Dict[str, Any], height: int, width: int) -> Dict[str, Any]:
|
| 214 |
+
"""
|
| 215 |
+
处理姿势数据,完全支持新的person_id数据结构
|
| 216 |
+
"""
|
| 217 |
+
processed_data = {}
|
| 218 |
+
|
| 219 |
+
# 确保使用新的数据结构
|
| 220 |
+
if 'num_persons' in pose_data and pose_data['num_persons'] > 0:
|
| 221 |
+
num_persons = pose_data['num_persons']
|
| 222 |
+
|
| 223 |
+
# 收集所有人的关键点数据
|
| 224 |
+
all_bodies = []
|
| 225 |
+
all_body_scores = []
|
| 226 |
+
all_hands = []
|
| 227 |
+
all_hand_scores = []
|
| 228 |
+
all_faces = []
|
| 229 |
+
all_face_scores = []
|
| 230 |
+
|
| 231 |
+
for person_id in range(num_persons):
|
| 232 |
+
person_key = f'person_{person_id}'
|
| 233 |
+
if person_key in pose_data:
|
| 234 |
+
person_data = pose_data[person_key]
|
| 235 |
+
all_bodies.append(person_data['body_keypoints'])
|
| 236 |
+
all_body_scores.append(person_data['body_scores'])
|
| 237 |
+
all_hands.extend([person_data['left_hand_keypoints'], person_data['right_hand_keypoints']])
|
| 238 |
+
all_hand_scores.extend([person_data['left_hand_scores'], person_data['right_hand_scores']])
|
| 239 |
+
all_faces.append(person_data['face_keypoints'])
|
| 240 |
+
all_face_scores.append(person_data['face_scores'])
|
| 241 |
+
|
| 242 |
+
# 合并所有人的数据
|
| 243 |
+
if all_bodies:
|
| 244 |
+
bodies = np.vstack(all_bodies)
|
| 245 |
+
body_scores = np.array(all_body_scores)
|
| 246 |
+
|
| 247 |
+
# 创建subset - 为每个人创建独立的subset行
|
| 248 |
+
subset = []
|
| 249 |
+
for person_id in range(num_persons):
|
| 250 |
+
person_subset = list(range(person_id * 18, (person_id + 1) * 18))
|
| 251 |
+
subset.append(person_subset)
|
| 252 |
+
subset = np.array(subset)
|
| 253 |
+
|
| 254 |
+
# 创建scores - 基于body_scores中的有效性
|
| 255 |
+
scores = np.ones_like(body_scores)
|
| 256 |
+
for i in range(num_persons):
|
| 257 |
+
for j in range(18):
|
| 258 |
+
if body_scores[i, j] < 0: # 如果body_scores为负数,认为无效
|
| 259 |
+
scores[i, j] = 0.0
|
| 260 |
+
else:
|
| 261 |
+
scores[i, j] = 1.0
|
| 262 |
+
else:
|
| 263 |
+
bodies = np.array([])
|
| 264 |
+
subset = np.array([[]])
|
| 265 |
+
scores = np.array([[]])
|
| 266 |
+
|
| 267 |
+
hands = np.array(all_hands) if all_hands else np.array([])
|
| 268 |
+
hand_scores = np.array(all_hand_scores) if all_hand_scores else np.array([])
|
| 269 |
+
faces = np.array(all_faces) if all_faces else np.array([])
|
| 270 |
+
face_scores = np.array(all_face_scores) if all_face_scores else np.array([])
|
| 271 |
+
|
| 272 |
+
else:
|
| 273 |
+
# 兼容性处理 - 如果不是新格式,返回空数据
|
| 274 |
+
bodies = np.array([])
|
| 275 |
+
subset = np.array([[]])
|
| 276 |
+
scores = np.array([[]])
|
| 277 |
+
hands = np.array([])
|
| 278 |
+
hand_scores = np.array([])
|
| 279 |
+
faces = np.array([])
|
| 280 |
+
face_scores = np.array([])
|
| 281 |
+
|
| 282 |
+
processed_data['bodies'] = {
|
| 283 |
+
'candidate': bodies,
|
| 284 |
+
'subset': subset,
|
| 285 |
+
'score': scores
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
processed_data['hands'] = hands
|
| 289 |
+
processed_data['hands_score'] = hand_scores
|
| 290 |
+
|
| 291 |
+
processed_data['faces'] = faces
|
| 292 |
+
processed_data['faces_score'] = face_scores
|
| 293 |
+
|
| 294 |
+
return processed_data
|
utils/preprocess_video.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import subprocess
|
| 4 |
+
import av
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
import torch.multiprocessing as mp
|
| 8 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 9 |
+
from utils.util import get_fps, read_frames, save_videos_from_pil
|
| 10 |
+
from utils.preprocess_video import *
|
| 11 |
+
from PIL import Image
|
| 12 |
+
import numpy as np
|
| 13 |
+
import json
|
| 14 |
+
|
| 15 |
+
def ensure_dir(directory):
|
| 16 |
+
if os.path.exists(directory):
|
| 17 |
+
print(f"Directory already exists: {directory}")
|
| 18 |
+
else:
|
| 19 |
+
os.makedirs(directory)
|
| 20 |
+
print(f"Created directory: {directory}")
|
| 21 |
+
return directory
|
| 22 |
+
|
| 23 |
+
# [previous helper functions remain the same]
|
| 24 |
+
def get_video_dimensions(video_path):
|
| 25 |
+
cmd = [
|
| 26 |
+
'ffprobe',
|
| 27 |
+
'-v', 'error',
|
| 28 |
+
'-select_streams', 'v:0',
|
| 29 |
+
'-show_entries', 'stream=width,height',
|
| 30 |
+
'-of', 'csv=p=0',
|
| 31 |
+
video_path
|
| 32 |
+
]
|
| 33 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 34 |
+
width, height = map(int, result.stdout.strip().split(','))
|
| 35 |
+
return width, height
|
| 36 |
+
|
| 37 |
+
def compile_frames_to_video(frame_dir, output_path, fps=30):
|
| 38 |
+
"""Compile frames into a video using H.264 codec."""
|
| 39 |
+
cmd = [
|
| 40 |
+
'ffmpeg', '-y',
|
| 41 |
+
'-f', 'image2',
|
| 42 |
+
'-r', str(fps),
|
| 43 |
+
'-i', f'{frame_dir}/%08d.jpg',
|
| 44 |
+
'-c:v', 'libx264',
|
| 45 |
+
'-preset', 'medium',
|
| 46 |
+
'-crf', '18',
|
| 47 |
+
'-pix_fmt', 'yuv420p',
|
| 48 |
+
output_path
|
| 49 |
+
]
|
| 50 |
+
subprocess.run(cmd, check=True)
|
| 51 |
+
print(f"Successfully compiled video: {output_path}")
|
| 52 |
+
|
| 53 |
+
def preprocess_videos(video_dir, dataset_name, square_crop=False, fps=24, quality_preset="medium", target_resolution=None):
|
| 54 |
+
"""
|
| 55 |
+
Preprocess all videos with optional square cropping, customizable FPS, quality, and resolution.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
video_dir (str): Directory containing input videos
|
| 59 |
+
dataset_name (str): Name of the dataset
|
| 60 |
+
square_crop (bool): Whether to crop videos to 1:1 aspect ratio (default: True)
|
| 61 |
+
fps (int): Target frames per second (default: 30)
|
| 62 |
+
quality_preset (str): Quality preset - "high", "medium", "low", "ultra_low" (default: "medium")
|
| 63 |
+
target_resolution (int): Target resolution for the shorter side (e.g., 512, 256). None for original
|
| 64 |
+
"""
|
| 65 |
+
result_dir = ensure_dir(f"../output/{dataset_name}_results")
|
| 66 |
+
|
| 67 |
+
# 质量设置
|
| 68 |
+
quality_settings = {
|
| 69 |
+
"high": {"qscale": "2", "crf": "18"}, # 高质量
|
| 70 |
+
"medium": {"qscale": "5", "crf": "23"}, # 中等质量
|
| 71 |
+
"low": {"qscale": "10", "crf": "28"}, # 低质量
|
| 72 |
+
"ultra_low": {"qscale": "15", "crf": "35"} # 超低质量
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
current_quality = quality_settings.get(quality_preset, quality_settings["medium"])
|
| 76 |
+
|
| 77 |
+
for video_file in os.listdir(video_dir):
|
| 78 |
+
if not video_file.endswith(".mp4"):
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
video_name = os.path.splitext(video_file)[0]
|
| 82 |
+
video_full_path = os.path.join(video_dir, video_file)
|
| 83 |
+
folder_path = f"{result_dir}/{video_name}"
|
| 84 |
+
|
| 85 |
+
frame_path = f"{folder_path}/crop_frame"
|
| 86 |
+
output_video_path = f"{folder_path}/crop_original_video.mp4"
|
| 87 |
+
|
| 88 |
+
# Skip if already processed
|
| 89 |
+
if os.path.exists(frame_path) and os.listdir(frame_path) and os.path.exists(output_video_path):
|
| 90 |
+
crop_status = "cropped" if square_crop else "original"
|
| 91 |
+
print(f"{crop_status.capitalize()} frames and video already exist for {video_name}. Skipping preprocessing.")
|
| 92 |
+
continue
|
| 93 |
+
|
| 94 |
+
try:
|
| 95 |
+
# Create output directory
|
| 96 |
+
os.makedirs(frame_path, exist_ok=True)
|
| 97 |
+
|
| 98 |
+
# Get video dimensions
|
| 99 |
+
width, height = get_video_dimensions(video_full_path)
|
| 100 |
+
|
| 101 |
+
# 构建视频滤镜
|
| 102 |
+
filters = []
|
| 103 |
+
|
| 104 |
+
if square_crop:
|
| 105 |
+
# Calculate crop dimensions
|
| 106 |
+
if width < height:
|
| 107 |
+
crop_size = width
|
| 108 |
+
x_offset = 0
|
| 109 |
+
y_offset = (height - width) // 2
|
| 110 |
+
else:
|
| 111 |
+
crop_size = height
|
| 112 |
+
x_offset = (width - height) // 2
|
| 113 |
+
y_offset = 0
|
| 114 |
+
filters.append(f'crop={crop_size}:{crop_size}:{x_offset}:{y_offset}')
|
| 115 |
+
|
| 116 |
+
# 添加分辨率缩放
|
| 117 |
+
if target_resolution:
|
| 118 |
+
if square_crop:
|
| 119 |
+
# 方形裁剪后直接缩放到目标分辨率
|
| 120 |
+
filters.append(f'scale={target_resolution}:{target_resolution}')
|
| 121 |
+
else:
|
| 122 |
+
# 保持宽高比缩放
|
| 123 |
+
filters.append(f'scale=-2:{target_resolution}:force_original_aspect_ratio=decrease')
|
| 124 |
+
|
| 125 |
+
# 添加帧率
|
| 126 |
+
filters.append(f'fps={fps}/1')
|
| 127 |
+
|
| 128 |
+
# 组合所有滤镜
|
| 129 |
+
filter_complex = ','.join(filters)
|
| 130 |
+
|
| 131 |
+
# 提取帧的命令
|
| 132 |
+
cmd = [
|
| 133 |
+
'ffmpeg', '-i', video_full_path,
|
| 134 |
+
'-vf', filter_complex,
|
| 135 |
+
'-f', 'image2',
|
| 136 |
+
'-qscale', current_quality["qscale"], # 使用可调节的质量
|
| 137 |
+
f'{frame_path}/%08d.jpg'
|
| 138 |
+
]
|
| 139 |
+
|
| 140 |
+
resolution_info = f" (Resolution: {target_resolution})" if target_resolution else ""
|
| 141 |
+
crop_info = "with square cropping" if square_crop else "without cropping"
|
| 142 |
+
print(f"Processing {video_file} {crop_info} (FPS: {fps}, Quality: {quality_preset}{resolution_info})")
|
| 143 |
+
|
| 144 |
+
subprocess.run(cmd, check=True)
|
| 145 |
+
print(f"Successfully extracted frames for {video_file}")
|
| 146 |
+
|
| 147 |
+
# Compile frames back into a video with optimized settings
|
| 148 |
+
compile_frames_to_video_optimized(frame_path, output_video_path, fps, quality_preset)
|
| 149 |
+
|
| 150 |
+
except Exception as e:
|
| 151 |
+
print(f"Error preprocessing {video_file}: {str(e)}")
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
def compile_frames_to_video_optimized(frame_dir, output_path, fps=30, quality_preset="medium"):
|
| 155 |
+
"""Compile frames into a video with optimized quality settings."""
|
| 156 |
+
|
| 157 |
+
# 质量设置 - CRF值(越高质量越低,文件越小)
|
| 158 |
+
quality_crf = {
|
| 159 |
+
"high": "18",
|
| 160 |
+
"medium": "23",
|
| 161 |
+
"low": "28",
|
| 162 |
+
"ultra_low": "35"
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
crf_value = quality_crf.get(quality_preset, "23")
|
| 166 |
+
|
| 167 |
+
cmd = [
|
| 168 |
+
'ffmpeg', '-y',
|
| 169 |
+
'-f', 'image2',
|
| 170 |
+
'-r', str(fps),
|
| 171 |
+
'-i', f'{frame_dir}/%08d.jpg',
|
| 172 |
+
'-c:v', 'libx264',
|
| 173 |
+
'-preset', 'medium', # 可以改为 'fast' 加速编码
|
| 174 |
+
'-crf', crf_value,
|
| 175 |
+
'-pix_fmt', 'yuv420p',
|
| 176 |
+
output_path
|
| 177 |
+
]
|
| 178 |
+
subprocess.run(cmd, check=True)
|
| 179 |
+
print(f"Successfully compiled optimized video: {output_path} (Quality: {quality_preset})")
|
| 180 |
+
|
| 181 |
+
# 使用示例:
|
| 182 |
+
|
| 183 |
+
# 1. 保持原分辨率,降低质量
|
| 184 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="low")
|
| 185 |
+
|
| 186 |
+
# 2. 降低分辨率到512x512(方形裁剪)
|
| 187 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="medium", target_resolution=512)
|
| 188 |
+
|
| 189 |
+
# 3. 极度压缩:低分辨率 + 超低质量
|
| 190 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="ultra_low", target_resolution=256)
|
| 191 |
+
|
| 192 |
+
# 4. 不裁剪,但缩放到较小尺寸
|
| 193 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=False, fps=30, quality_preset="low", target_resolution=480)
|
| 194 |
+
|
| 195 |
+
def process_npz_files(input_folder_path, output_folder_path):
|
| 196 |
+
"""
|
| 197 |
+
Process all NPZ files in the specified folder and generate the required output format.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
input_folder_path (str): Path to the folder containing NPZ files
|
| 201 |
+
output_folder_path (str): Path where output files will be saved
|
| 202 |
+
"""
|
| 203 |
+
# Get all NPZ files in the folder
|
| 204 |
+
npz_files = sorted([f for f in os.listdir(input_folder_path) if f.endswith('.npz')])
|
| 205 |
+
total_frames = len(npz_files)
|
| 206 |
+
|
| 207 |
+
output = []
|
| 208 |
+
|
| 209 |
+
for idx, npz_file in enumerate(npz_files):
|
| 210 |
+
file_path = os.path.join(input_folder_path, npz_file)
|
| 211 |
+
data = np.load(file_path, allow_pickle=True)
|
| 212 |
+
|
| 213 |
+
# Process bodies data
|
| 214 |
+
bodies = data['bodies']
|
| 215 |
+
body_scores = data['body_scores'][0]
|
| 216 |
+
|
| 217 |
+
# Process hands data
|
| 218 |
+
hands = data['hands']
|
| 219 |
+
hands_scores = data['hands_scores']
|
| 220 |
+
|
| 221 |
+
# Process faces data
|
| 222 |
+
faces = data['faces'][0]
|
| 223 |
+
faces_scores = data['faces_scores'][0]
|
| 224 |
+
|
| 225 |
+
# Convert coordinates to strings with space separation
|
| 226 |
+
frame_data = []
|
| 227 |
+
|
| 228 |
+
# Add body coordinates and scores
|
| 229 |
+
for i in range(bodies.shape[0]):
|
| 230 |
+
frame_data.extend([f"{bodies[i][0]:.8f}", f"{bodies[i][1]:.8f}"])
|
| 231 |
+
for score in body_scores:
|
| 232 |
+
frame_data.append(f"{score:.8f}")
|
| 233 |
+
|
| 234 |
+
# Add hand coordinates and scores
|
| 235 |
+
for hand in hands:
|
| 236 |
+
for point in hand:
|
| 237 |
+
frame_data.extend([f"{point[0]:.8f}", f"{point[1]:.8f}"])
|
| 238 |
+
for hand_score in hands_scores:
|
| 239 |
+
frame_data.extend([f"{score:.8f}" for score in hand_score])
|
| 240 |
+
|
| 241 |
+
# Add face coordinates and scores
|
| 242 |
+
for point in faces:
|
| 243 |
+
frame_data.extend([f"{point[0]:.8f}", f"{point[1]:.8f}"])
|
| 244 |
+
for score in faces_scores:
|
| 245 |
+
frame_data.append(f"{score:.8f}")
|
| 246 |
+
|
| 247 |
+
# Add frame count
|
| 248 |
+
frame_count = idx / (total_frames - 1) if total_frames > 1 else 0
|
| 249 |
+
frame_data.append(f"{frame_count:.8f}")
|
| 250 |
+
|
| 251 |
+
# 验证这一帧的数据点数是否为385
|
| 252 |
+
if len(frame_data) != 385:
|
| 253 |
+
print(f"Warning: Frame {idx} in {input_folder_path} has {len(frame_data)} values instead of 385")
|
| 254 |
+
continue # 跳过这一帧
|
| 255 |
+
|
| 256 |
+
# Join all data with spaces
|
| 257 |
+
output.append(" ".join(frame_data))
|
| 258 |
+
|
| 259 |
+
return " ".join(output) + "\n"
|
utils/preprocess_video_improve.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import subprocess
|
| 4 |
+
import av
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
import torch.multiprocessing as mp
|
| 8 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 9 |
+
from utils.util import get_fps, read_frames, save_videos_from_pil
|
| 10 |
+
from PIL import Image
|
| 11 |
+
import numpy as np
|
| 12 |
+
import json
|
| 13 |
+
|
| 14 |
+
def ensure_dir(directory):
|
| 15 |
+
if os.path.exists(directory):
|
| 16 |
+
print(f"Directory already exists: {directory}")
|
| 17 |
+
else:
|
| 18 |
+
os.makedirs(directory)
|
| 19 |
+
print(f"Created directory: {directory}")
|
| 20 |
+
return directory
|
| 21 |
+
|
| 22 |
+
# [previous helper functions remain the same]
|
| 23 |
+
def get_video_dimensions(video_path):
|
| 24 |
+
cmd = [
|
| 25 |
+
'ffprobe',
|
| 26 |
+
'-v', 'error',
|
| 27 |
+
'-select_streams', 'v:0',
|
| 28 |
+
'-show_entries', 'stream=width,height',
|
| 29 |
+
'-of', 'csv=p=0',
|
| 30 |
+
video_path
|
| 31 |
+
]
|
| 32 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 33 |
+
width, height = map(int, result.stdout.strip().split(','))
|
| 34 |
+
return width, height
|
| 35 |
+
|
| 36 |
+
def compile_frames_to_video(frame_dir, output_path, fps=30):
|
| 37 |
+
"""Compile frames into a video using H.264 codec."""
|
| 38 |
+
cmd = [
|
| 39 |
+
'ffmpeg', '-y',
|
| 40 |
+
'-f', 'image2',
|
| 41 |
+
'-r', str(fps),
|
| 42 |
+
'-i', f'{frame_dir}/%08d.jpg',
|
| 43 |
+
'-c:v', 'libx264',
|
| 44 |
+
'-preset', 'medium',
|
| 45 |
+
'-crf', '18',
|
| 46 |
+
'-pix_fmt', 'yuv420p',
|
| 47 |
+
output_path
|
| 48 |
+
]
|
| 49 |
+
subprocess.run(cmd, check=True)
|
| 50 |
+
print(f"Successfully compiled video: {output_path}")
|
| 51 |
+
|
| 52 |
+
def preprocess_videos(video_dir, dataset_name, square_crop=False, fps=24, quality_preset="medium", target_resolution=None):
|
| 53 |
+
"""
|
| 54 |
+
Preprocess all videos with optional square cropping, customizable FPS, quality, and resolution.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
video_dir (str): Directory containing input videos
|
| 58 |
+
dataset_name (str): Name of the dataset
|
| 59 |
+
square_crop (bool): Whether to crop videos to 1:1 aspect ratio (default: True)
|
| 60 |
+
fps (int): Target frames per second (default: 30)
|
| 61 |
+
quality_preset (str): Quality preset - "high", "medium", "low", "ultra_low" (default: "medium")
|
| 62 |
+
target_resolution (int): Target resolution for the shorter side (e.g., 512, 256). None for original
|
| 63 |
+
"""
|
| 64 |
+
result_dir = ensure_dir(f"../output/{dataset_name}_results")
|
| 65 |
+
|
| 66 |
+
# 质量设置
|
| 67 |
+
quality_settings = {
|
| 68 |
+
"high": {"qscale": "2", "crf": "18"}, # 高质量
|
| 69 |
+
"medium": {"qscale": "5", "crf": "23"}, # 中等质量
|
| 70 |
+
"low": {"qscale": "10", "crf": "28"}, # 低质量
|
| 71 |
+
"ultra_low": {"qscale": "15", "crf": "35"} # 超低质量
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
current_quality = quality_settings.get(quality_preset, quality_settings["medium"])
|
| 75 |
+
|
| 76 |
+
for video_file in os.listdir(video_dir):
|
| 77 |
+
if not video_file.endswith(".mp4"):
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
video_name = os.path.splitext(video_file)[0]
|
| 81 |
+
video_full_path = os.path.join(video_dir, video_file)
|
| 82 |
+
folder_path = f"{result_dir}/{video_name}"
|
| 83 |
+
|
| 84 |
+
frame_path = f"{folder_path}/crop_frame"
|
| 85 |
+
output_video_path = f"{folder_path}/crop_original_video.mp4"
|
| 86 |
+
|
| 87 |
+
# Skip if already processed
|
| 88 |
+
if os.path.exists(frame_path) and os.listdir(frame_path) and os.path.exists(output_video_path):
|
| 89 |
+
crop_status = "cropped" if square_crop else "original"
|
| 90 |
+
print(f"{crop_status.capitalize()} frames and video already exist for {video_name}. Skipping preprocessing.")
|
| 91 |
+
continue
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
# Create output directory
|
| 95 |
+
os.makedirs(frame_path, exist_ok=True)
|
| 96 |
+
|
| 97 |
+
# Get video dimensions
|
| 98 |
+
width, height = get_video_dimensions(video_full_path)
|
| 99 |
+
|
| 100 |
+
# 构建视频滤镜
|
| 101 |
+
filters = []
|
| 102 |
+
|
| 103 |
+
if square_crop:
|
| 104 |
+
# Calculate crop dimensions
|
| 105 |
+
if width < height:
|
| 106 |
+
crop_size = width
|
| 107 |
+
x_offset = 0
|
| 108 |
+
y_offset = (height - width) // 2
|
| 109 |
+
else:
|
| 110 |
+
crop_size = height
|
| 111 |
+
x_offset = (width - height) // 2
|
| 112 |
+
y_offset = 0
|
| 113 |
+
filters.append(f'crop={crop_size}:{crop_size}:{x_offset}:{y_offset}')
|
| 114 |
+
|
| 115 |
+
# 添加分辨率缩放
|
| 116 |
+
if target_resolution:
|
| 117 |
+
if square_crop:
|
| 118 |
+
# 方形裁剪后直接缩放到目标分辨率
|
| 119 |
+
filters.append(f'scale={target_resolution}:{target_resolution}')
|
| 120 |
+
else:
|
| 121 |
+
# 保持宽高比缩放
|
| 122 |
+
filters.append(f'scale=-2:{target_resolution}:force_original_aspect_ratio=decrease')
|
| 123 |
+
|
| 124 |
+
# 添加帧率
|
| 125 |
+
filters.append(f'fps={fps}/1')
|
| 126 |
+
|
| 127 |
+
# 组合所有滤镜
|
| 128 |
+
filter_complex = ','.join(filters)
|
| 129 |
+
|
| 130 |
+
# 提取帧的命令
|
| 131 |
+
cmd = [
|
| 132 |
+
'ffmpeg', '-i', video_full_path,
|
| 133 |
+
'-vf', filter_complex,
|
| 134 |
+
'-f', 'image2',
|
| 135 |
+
'-qscale', current_quality["qscale"], # 使用可调节的质量
|
| 136 |
+
f'{frame_path}/%08d.jpg'
|
| 137 |
+
]
|
| 138 |
+
|
| 139 |
+
resolution_info = f" (Resolution: {target_resolution})" if target_resolution else ""
|
| 140 |
+
crop_info = "with square cropping" if square_crop else "without cropping"
|
| 141 |
+
print(f"Processing {video_file} {crop_info} (FPS: {fps}, Quality: {quality_preset}{resolution_info})")
|
| 142 |
+
|
| 143 |
+
subprocess.run(cmd, check=True)
|
| 144 |
+
print(f"Successfully extracted frames for {video_file}")
|
| 145 |
+
|
| 146 |
+
# Compile frames back into a video with optimized settings
|
| 147 |
+
compile_frames_to_video_optimized(frame_path, output_video_path, fps, quality_preset)
|
| 148 |
+
|
| 149 |
+
except Exception as e:
|
| 150 |
+
print(f"Error preprocessing {video_file}: {str(e)}")
|
| 151 |
+
continue
|
| 152 |
+
|
| 153 |
+
def compile_frames_to_video_optimized(frame_dir, output_path, fps=30, quality_preset="medium"):
|
| 154 |
+
"""Compile frames into a video with optimized quality settings."""
|
| 155 |
+
|
| 156 |
+
# 质量设置 - CRF值(越高质量越低,文件越小)
|
| 157 |
+
quality_crf = {
|
| 158 |
+
"high": "18",
|
| 159 |
+
"medium": "23",
|
| 160 |
+
"low": "28",
|
| 161 |
+
"ultra_low": "35"
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
crf_value = quality_crf.get(quality_preset, "23")
|
| 165 |
+
|
| 166 |
+
cmd = [
|
| 167 |
+
'ffmpeg', '-y',
|
| 168 |
+
'-f', 'image2',
|
| 169 |
+
'-r', str(fps),
|
| 170 |
+
'-i', f'{frame_dir}/%08d.jpg',
|
| 171 |
+
'-c:v', 'libx264',
|
| 172 |
+
'-preset', 'medium', # 可以改为 'fast' 加速编码
|
| 173 |
+
'-crf', crf_value,
|
| 174 |
+
'-pix_fmt', 'yuv420p',
|
| 175 |
+
output_path
|
| 176 |
+
]
|
| 177 |
+
subprocess.run(cmd, check=True)
|
| 178 |
+
print(f"Successfully compiled optimized video: {output_path} (Quality: {quality_preset})")
|
| 179 |
+
|
| 180 |
+
# 使用示例:
|
| 181 |
+
|
| 182 |
+
# 1. 保持原分辨率,降低质量
|
| 183 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="low")
|
| 184 |
+
|
| 185 |
+
# 2. 降低分辨率到512x512(方形裁剪)
|
| 186 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="medium", target_resolution=512)
|
| 187 |
+
|
| 188 |
+
# 3. 极度压缩:低分辨率 + 超低质量
|
| 189 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=True, fps=30, quality_preset="ultra_low", target_resolution=256)
|
| 190 |
+
|
| 191 |
+
# 4. 不裁剪,但缩放到较小尺寸
|
| 192 |
+
# preprocess_videos(video_dir, dataset_name, square_crop=False, fps=30, quality_preset="low", target_resolution=480)
|
| 193 |
+
|
| 194 |
+
def process_npz_files(input_folder_path, output_folder_path):
|
| 195 |
+
"""
|
| 196 |
+
Process all NPZ files in the specified folder and generate the required output format.
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
input_folder_path (str): Path to the folder containing NPZ files
|
| 200 |
+
output_folder_path (str): Path where output files will be saved
|
| 201 |
+
"""
|
| 202 |
+
# Get all NPZ files in the folder
|
| 203 |
+
npz_files = sorted([f for f in os.listdir(input_folder_path) if f.endswith('.npz')])
|
| 204 |
+
total_frames = len(npz_files)
|
| 205 |
+
|
| 206 |
+
output = []
|
| 207 |
+
|
| 208 |
+
for idx, npz_file in enumerate(npz_files):
|
| 209 |
+
file_path = os.path.join(input_folder_path, npz_file)
|
| 210 |
+
data = np.load(file_path, allow_pickle=True)
|
| 211 |
+
|
| 212 |
+
# Process bodies data
|
| 213 |
+
bodies = data['bodies']
|
| 214 |
+
body_scores = data['body_scores'][0]
|
| 215 |
+
|
| 216 |
+
# Process hands data
|
| 217 |
+
hands = data['hands']
|
| 218 |
+
hands_scores = data['hands_scores']
|
| 219 |
+
|
| 220 |
+
# Process faces data
|
| 221 |
+
faces = data['faces'][0]
|
| 222 |
+
faces_scores = data['faces_scores'][0]
|
| 223 |
+
|
| 224 |
+
# Convert coordinates to strings with space separation
|
| 225 |
+
frame_data = []
|
| 226 |
+
|
| 227 |
+
# Add body coordinates and scores
|
| 228 |
+
for i in range(bodies.shape[0]):
|
| 229 |
+
frame_data.extend([f"{bodies[i][0]:.8f}", f"{bodies[i][1]:.8f}"])
|
| 230 |
+
for score in body_scores:
|
| 231 |
+
frame_data.append(f"{score:.8f}")
|
| 232 |
+
|
| 233 |
+
# Add hand coordinates and scores
|
| 234 |
+
for hand in hands:
|
| 235 |
+
for point in hand:
|
| 236 |
+
frame_data.extend([f"{point[0]:.8f}", f"{point[1]:.8f}"])
|
| 237 |
+
for hand_score in hands_scores:
|
| 238 |
+
frame_data.extend([f"{score:.8f}" for score in hand_score])
|
| 239 |
+
|
| 240 |
+
# Add face coordinates and scores
|
| 241 |
+
for point in faces:
|
| 242 |
+
frame_data.extend([f"{point[0]:.8f}", f"{point[1]:.8f}"])
|
| 243 |
+
for score in faces_scores:
|
| 244 |
+
frame_data.append(f"{score:.8f}")
|
| 245 |
+
|
| 246 |
+
# Add frame count
|
| 247 |
+
frame_count = idx / (total_frames - 1) if total_frames > 1 else 0
|
| 248 |
+
frame_data.append(f"{frame_count:.8f}")
|
| 249 |
+
|
| 250 |
+
# 验证这一帧的数据点数是否为385
|
| 251 |
+
if len(frame_data) != 385:
|
| 252 |
+
print(f"Warning: Frame {idx} in {input_folder_path} has {len(frame_data)} values instead of 385")
|
| 253 |
+
continue # 跳过这一帧
|
| 254 |
+
|
| 255 |
+
# Join all data with spaces
|
| 256 |
+
output.append(" ".join(frame_data))
|
| 257 |
+
|
| 258 |
+
return " ".join(output) + "\n"
|
utils/stats_npz.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import fcntl
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict, Iterable
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
STATUS_FIELDS = [
|
| 9 |
+
"sign_language",
|
| 10 |
+
"title",
|
| 11 |
+
"duration_sec",
|
| 12 |
+
"start_sec",
|
| 13 |
+
"end_sec",
|
| 14 |
+
"subtitle_languages",
|
| 15 |
+
"subtitle_dir_path",
|
| 16 |
+
"subtitle_en_source",
|
| 17 |
+
"raw_video_path",
|
| 18 |
+
"raw_metadata_path",
|
| 19 |
+
"metadata_status",
|
| 20 |
+
"subtitle_status",
|
| 21 |
+
"download_status",
|
| 22 |
+
"process_status",
|
| 23 |
+
"upload_status",
|
| 24 |
+
"local_cleanup_status",
|
| 25 |
+
"archive_name",
|
| 26 |
+
"last_error",
|
| 27 |
+
"updated_at",
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def load_stats(stats_path: Path) -> Dict[str, Dict[str, str]]:
|
| 32 |
+
if not stats_path.exists():
|
| 33 |
+
return {}
|
| 34 |
+
|
| 35 |
+
data = np.load(stats_path, allow_pickle=True)
|
| 36 |
+
video_ids = [str(item) for item in data.get("video_ids", np.asarray([], dtype=object)).tolist()]
|
| 37 |
+
stats: Dict[str, Dict[str, str]] = {}
|
| 38 |
+
for index, video_id in enumerate(video_ids):
|
| 39 |
+
record = {}
|
| 40 |
+
for field in STATUS_FIELDS:
|
| 41 |
+
values = data.get(field)
|
| 42 |
+
record[field] = str(values[index]) if values is not None and index < len(values) else ""
|
| 43 |
+
stats[video_id] = record
|
| 44 |
+
return stats
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def save_stats(stats_path: Path, stats: Dict[str, Dict[str, str]]) -> None:
|
| 48 |
+
stats_path.parent.mkdir(parents=True, exist_ok=True)
|
| 49 |
+
video_ids = sorted(stats)
|
| 50 |
+
payload = {"video_ids": np.asarray(video_ids, dtype=object)}
|
| 51 |
+
for field in STATUS_FIELDS:
|
| 52 |
+
payload[field] = np.asarray([stats[video_id].get(field, "") for video_id in video_ids], dtype=object)
|
| 53 |
+
tmp_path = stats_path.parent / f".{stats_path.stem}.tmp.npz"
|
| 54 |
+
np.savez(tmp_path, **payload)
|
| 55 |
+
tmp_path.replace(stats_path)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str, str]:
|
| 59 |
+
if video_id not in stats:
|
| 60 |
+
stats[video_id] = {field: "" for field in STATUS_FIELDS}
|
| 61 |
+
return stats[video_id]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _lock_path(stats_path: Path) -> Path:
|
| 65 |
+
return stats_path.with_suffix(stats_path.suffix + ".lock")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def update_video_stats(stats_path: Path, video_id: str, **updates: str) -> Dict[str, str]:
|
| 69 |
+
lock_path = _lock_path(stats_path)
|
| 70 |
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
| 71 |
+
with lock_path.open("w", encoding="utf-8") as handle:
|
| 72 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 73 |
+
stats = load_stats(stats_path)
|
| 74 |
+
record = ensure_record(stats, video_id)
|
| 75 |
+
for key, value in updates.items():
|
| 76 |
+
if key in STATUS_FIELDS:
|
| 77 |
+
record[key] = "" if value is None else str(value)
|
| 78 |
+
save_stats(stats_path, stats)
|
| 79 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 80 |
+
return dict(record)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def update_many_video_stats(stats_path: Path, video_ids: Iterable[str], **updates: str) -> None:
|
| 84 |
+
lock_path = _lock_path(stats_path)
|
| 85 |
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
| 86 |
+
with lock_path.open("w", encoding="utf-8") as handle:
|
| 87 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 88 |
+
stats = load_stats(stats_path)
|
| 89 |
+
for video_id in video_ids:
|
| 90 |
+
record = ensure_record(stats, video_id)
|
| 91 |
+
for key, value in updates.items():
|
| 92 |
+
if key in STATUS_FIELDS:
|
| 93 |
+
record[key] = "" if value is None else str(value)
|
| 94 |
+
save_stats(stats_path, stats)
|
| 95 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
utils/util.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
import os
|
| 3 |
+
import os.path as osp
|
| 4 |
+
import shutil
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import av
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torchvision
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from PIL import Image
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def seed_everything(seed):
|
| 17 |
+
import random
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
torch.manual_seed(seed)
|
| 22 |
+
torch.cuda.manual_seed_all(seed)
|
| 23 |
+
np.random.seed(seed % (2**32))
|
| 24 |
+
random.seed(seed)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def import_filename(filename):
|
| 28 |
+
spec = importlib.util.spec_from_file_location("mymodule", filename)
|
| 29 |
+
module = importlib.util.module_from_spec(spec)
|
| 30 |
+
sys.modules[spec.name] = module
|
| 31 |
+
spec.loader.exec_module(module)
|
| 32 |
+
return module
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def delete_additional_ckpt(base_path, num_keep):
|
| 36 |
+
dirs = []
|
| 37 |
+
for d in os.listdir(base_path):
|
| 38 |
+
if d.startswith("checkpoint-"):
|
| 39 |
+
dirs.append(d)
|
| 40 |
+
num_tot = len(dirs)
|
| 41 |
+
if num_tot <= num_keep:
|
| 42 |
+
return
|
| 43 |
+
# ensure ckpt is sorted and delete the ealier!
|
| 44 |
+
del_dirs = sorted(dirs, key=lambda x: int(x.split("-")[-1]))[: num_tot - num_keep]
|
| 45 |
+
for d in del_dirs:
|
| 46 |
+
path_to_dir = osp.join(base_path, d)
|
| 47 |
+
if osp.exists(path_to_dir):
|
| 48 |
+
shutil.rmtree(path_to_dir)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def save_videos_from_pil(pil_images, path, fps=8):
|
| 52 |
+
import av
|
| 53 |
+
|
| 54 |
+
save_fmt = Path(path).suffix
|
| 55 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
| 56 |
+
width, height = pil_images[0].size
|
| 57 |
+
|
| 58 |
+
if save_fmt == ".mp4":
|
| 59 |
+
codec = "libx264"
|
| 60 |
+
container = av.open(path, "w")
|
| 61 |
+
stream = container.add_stream(codec, rate=fps)
|
| 62 |
+
|
| 63 |
+
stream.width = width
|
| 64 |
+
stream.height = height
|
| 65 |
+
|
| 66 |
+
for pil_image in pil_images:
|
| 67 |
+
# pil_image = Image.fromarray(image_arr).convert("RGB")
|
| 68 |
+
av_frame = av.VideoFrame.from_image(pil_image)
|
| 69 |
+
container.mux(stream.encode(av_frame))
|
| 70 |
+
container.mux(stream.encode())
|
| 71 |
+
container.close()
|
| 72 |
+
|
| 73 |
+
elif save_fmt == ".gif":
|
| 74 |
+
pil_images[0].save(
|
| 75 |
+
fp=path,
|
| 76 |
+
format="GIF",
|
| 77 |
+
append_images=pil_images[1:],
|
| 78 |
+
save_all=True,
|
| 79 |
+
duration=(1 / fps * 1000),
|
| 80 |
+
loop=0,
|
| 81 |
+
)
|
| 82 |
+
else:
|
| 83 |
+
raise ValueError("Unsupported file type. Use .mp4 or .gif.")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):
|
| 87 |
+
videos = rearrange(videos, "b c t h w -> t b c h w")
|
| 88 |
+
height, width = videos.shape[-2:]
|
| 89 |
+
outputs = []
|
| 90 |
+
|
| 91 |
+
for x in videos:
|
| 92 |
+
x = torchvision.utils.make_grid(x, nrow=n_rows) # (c h w)
|
| 93 |
+
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) # (h w c)
|
| 94 |
+
if rescale:
|
| 95 |
+
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
|
| 96 |
+
x = (x * 255).numpy().astype(np.uint8)
|
| 97 |
+
x = Image.fromarray(x)
|
| 98 |
+
|
| 99 |
+
outputs.append(x)
|
| 100 |
+
|
| 101 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
| 102 |
+
|
| 103 |
+
save_videos_from_pil(outputs, path, fps)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def read_frames(video_path):
|
| 107 |
+
container = av.open(video_path)
|
| 108 |
+
|
| 109 |
+
video_stream = next(s for s in container.streams if s.type == "video")
|
| 110 |
+
frames = []
|
| 111 |
+
for packet in container.demux(video_stream):
|
| 112 |
+
for frame in packet.decode():
|
| 113 |
+
image = Image.frombytes(
|
| 114 |
+
"RGB",
|
| 115 |
+
(frame.width, frame.height),
|
| 116 |
+
frame.to_rgb().to_ndarray(),
|
| 117 |
+
)
|
| 118 |
+
frames.append(image)
|
| 119 |
+
|
| 120 |
+
return frames
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_fps(video_path):
|
| 124 |
+
container = av.open(video_path)
|
| 125 |
+
video_stream = next(s for s in container.streams if s.type == "video")
|
| 126 |
+
fps = video_stream.average_rate
|
| 127 |
+
container.close()
|
| 128 |
+
return fps
|