File size: 8,041 Bytes
0a4deb9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | #!/usr/bin/env bash
# Download all evaluation data to /opt/dlami/nvme (or WORK_ROOT).
#
# Downloads:
# 1) Video-MME benchmark videos
# 2) LVBench benchmark videos
# 3) Sync eval: original oops videos, random-shift videos, extracted audio
# 4) kto_training_data_v2_test.jsonl (sync eval test set, also in repo data/)
#
# Requires: conda env already set up (bash setup_env.sh first).
#
# Usage:
# bash setup_data.sh
# WORK_ROOT=/my/fast/disk bash setup_data.sh
#
set -euo pipefail
WORK_ROOT="${WORK_ROOT:-/opt/dlami/nvme}"
CONDA_ENV="${CONDA_ENV:-video}"
INSTALL_DIR="${INSTALL_DIR:-${HOME}/anaconda3}"
HF_CACHE="${HF_CACHE:-${HOME}/hf_datasets}"
REPO_ROOT="$(cd "$(dirname "$0")" && pwd)"
# Sync eval paths (align with eval_dpo_sync.py --data-root default)
DATA_ROOT="${WORK_ROOT}/video_source"
ORIGINAL_ROOT="${DATA_ROOT}/original"
ORIGINAL_OOPS_DIR="${ORIGINAL_ROOT}/uag_oops"
RANDOM_SHIFT_DIR="${DATA_ROOT}/random_shift_video"
EXTRACTED_AUDIO_DIR="${DATA_ROOT}/extracted_audio"
log() { echo -e "\n\033[1;36m>>> $*\033[0m"; }
# ββ Activate conda ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
if [[ -f "${INSTALL_DIR}/etc/profile.d/conda.sh" ]]; then
source "${INSTALL_DIR}/etc/profile.d/conda.sh"
elif command -v conda &>/dev/null; then
eval "$(conda shell.bash hook)"
else
echo "Error: conda not found. Run setup_env.sh first."
exit 1
fi
conda activate "${CONDA_ENV}"
# ββ Detect HF CLI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
if command -v hf &>/dev/null; then
HF_CLI="hf"
elif command -v huggingface-cli &>/dev/null; then
HF_CLI="huggingface-cli"
else
echo "Error: neither 'hf' nor 'huggingface-cli' found. Run setup_env.sh first."
exit 1
fi
echo "Using HF CLI: ${HF_CLI}"
# ββ Helpers βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
has_mp4_files() { compgen -G "$1/*.mp4" &>/dev/null; }
has_wav_files() { find "$1" -name "*.wav" -print -quit 2>/dev/null | grep -q .; }
flatten_mp4s() {
local dir="$1"
shopt -s globstar nullglob
for src in "${dir}"/**/*.mp4; do
[[ "$(dirname "$src")" != "${dir}" ]] && mv -n "$src" "${dir}/"
done
shopt -u globstar nullglob
}
###############################################################################
# 1) Video-MME
###############################################################################
log "[1/5] Video-MME"
VIDEOMME_DIR="${WORK_ROOT}/videomme"
if [[ -d "${VIDEOMME_DIR}/data/data" ]] && has_mp4_files "${VIDEOMME_DIR}/data/data"; then
echo " Already present at ${VIDEOMME_DIR}/data/data; skipping."
else
python "${REPO_ROOT}/scripts/download_videomme.py" --output-dir "${VIDEOMME_DIR}"
fi
###############################################################################
# 2) LVBench
###############################################################################
log "[2/5] LVBench"
LVBENCH_DIR="${WORK_ROOT}/lvbench"
if [[ -d "${LVBENCH_DIR}" ]] && has_mp4_files "${LVBENCH_DIR}"; then
echo " Already present at ${LVBENCH_DIR}; skipping."
else
python "${REPO_ROOT}/scripts/download_lvbench.py" --output-dir "${LVBENCH_DIR}"
fi
###############################################################################
# 3) Original oops videos (hasnat79/ual_bench β original/uag_oops/)
###############################################################################
log "[3/5] Original oops videos (sync eval)"
mkdir -p "${ORIGINAL_ROOT}" "${HF_CACHE}/ual_bench"
if has_mp4_files "${ORIGINAL_OOPS_DIR}"; then
echo " Already present at ${ORIGINAL_OOPS_DIR}; skipping."
else
"${HF_CLI}" download hasnat79/ual_bench \
--repo-type dataset \
--include "uag_oops.tar" \
--local-dir "${HF_CACHE}/ual_bench"
tar -xf "${HF_CACHE}/ual_bench/uag_oops.tar" -C "${ORIGINAL_ROOT}"
fi
###############################################################################
# 4) Random-shift videos (Rakancorle11/random_shift_video β random_shift_video/{delay,early}/)
###############################################################################
log "[4/5] Random-shift videos (sync eval)"
mkdir -p "${RANDOM_SHIFT_DIR}" "${HF_CACHE}/random_shift_video"
if has_mp4_files "${RANDOM_SHIFT_DIR}/delay" && has_mp4_files "${RANDOM_SHIFT_DIR}/early"; then
echo " Already present; skipping."
else
"${HF_CLI}" download Rakancorle11/random_shift_video \
--repo-type dataset \
--local-dir "${HF_CACHE}/random_shift_video"
mkdir -p "${RANDOM_SHIFT_DIR}/delay" "${RANDOM_SHIFT_DIR}/early"
tar -xzf "${HF_CACHE}/random_shift_video/delay.tar.gz" -C "${RANDOM_SHIFT_DIR}/delay"
tar -xzf "${HF_CACHE}/random_shift_video/early.tar.gz" -C "${RANDOM_SHIFT_DIR}/early"
if [[ -f "${HF_CACHE}/random_shift_video/metadata/shift_metadata.jsonl" ]]; then
cp "${HF_CACHE}/random_shift_video/metadata/shift_metadata.jsonl" "${RANDOM_SHIFT_DIR}/"
fi
flatten_mp4s "${RANDOM_SHIFT_DIR}/delay"
flatten_mp4s "${RANDOM_SHIFT_DIR}/early"
fi
###############################################################################
# 5) Extracted audio WAVs (Rakancorle11/extracted_audio β extracted_audio/)
###############################################################################
log "[5/5] Extracted audio (sync eval)"
mkdir -p "${EXTRACTED_AUDIO_DIR}" "${HF_CACHE}/extracted_audio"
if has_wav_files "${EXTRACTED_AUDIO_DIR}"; then
echo " Already present; skipping."
else
"${HF_CLI}" download Rakancorle11/extracted_audio \
--repo-type dataset \
--include "extracted_audio.tar.gz" \
--local-dir "${HF_CACHE}/extracted_audio"
tar -xzf "${HF_CACHE}/extracted_audio/extracted_audio.tar.gz" -C "${DATA_ROOT}"
fi
###############################################################################
# Copy test JSONL into DATA_ROOT (so --test-jsonl can default)
###############################################################################
TEST_JSONL_SRC="${REPO_ROOT}/data/kto_training_data_v2_test.jsonl"
TEST_JSONL_DST="${DATA_ROOT}/kto_training_data_v2_test.jsonl"
if [[ -f "${TEST_JSONL_SRC}" ]] && [[ ! -f "${TEST_JSONL_DST}" ]]; then
cp "${TEST_JSONL_SRC}" "${TEST_JSONL_DST}"
echo "Copied test JSONL β ${TEST_JSONL_DST}"
fi
###############################################################################
# Summary
###############################################################################
echo ""
echo "========================================"
echo " Data setup complete"
echo "========================================"
echo " Video-MME videos: ${VIDEOMME_DIR}/data/data/"
echo " LVBench videos: ${LVBENCH_DIR}/"
echo " Sync originals: ${ORIGINAL_OOPS_DIR}/"
echo " Sync random_shift: ${RANDOM_SHIFT_DIR}/{delay,early}/"
echo " Sync extracted_audio: ${EXTRACTED_AUDIO_DIR}/"
echo " Sync test JSONL: ${TEST_JSONL_DST}"
echo ""
echo " original mp4: $(find "${ORIGINAL_OOPS_DIR}" -name '*.mp4' 2>/dev/null | wc -l)"
echo " random_shift delay: $(find "${RANDOM_SHIFT_DIR}/delay" -name '*.mp4' 2>/dev/null | wc -l)"
echo " random_shift early: $(find "${RANDOM_SHIFT_DIR}/early" -name '*.mp4' 2>/dev/null | wc -l)"
echo " extracted_audio wav: $(find "${EXTRACTED_AUDIO_DIR}" -name '*.wav' 2>/dev/null | wc -l)"
echo ""
echo "FineVideo pipeline: run these manually (requires Gemini API keys):"
echo " python code-202604/finevideo/filter_finevideo.py --target-count 1000 --early-stop"
echo " python code-202604/finevideo/download_finevideo_subset.py --max-videos 500"
echo " python code-202604/finevideo/extract_audio_finevideo.py"
echo " python code-202604/finevideo/generate_finevideo_sft.py --api-key YOUR_KEY"
echo " python code-202604/finevideo/generate_finevideo_dpo.py"
echo "========================================"
|