File size: 5,440 Bytes
0a4deb9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | # Commands (you run these yourself)
Assume you cloned the repo and ran `bash setup_env.sh` (installs Anaconda under `~/anaconda3` if conda is missing, then creates the env). On first login after install, run `source ~/anaconda3/etc/profile.d/conda.sh` before `conda activate`.
```bash
conda activate video # or whatever CONDA_ENV you used
export REPO_ROOT="$(pwd)" # top of CleverHans-Evaluation clone
export SCRIPTS="${REPO_ROOT}/scripts"
export SYNC_TEST="${REPO_ROOT}/data/kto_training_data_v2_test.jsonl"
# Layout (fixed across your machines):
# Data (videos, merged weights, sync media) → fast disk
# Eval JSONL / metrics / summaries → ubuntu home
export WORK_ROOT="${WORK_ROOT:-/opt/dlami/nvme}"
export EVAL_ROOT="${EVAL_ROOT:-/home/ubuntu/eval_results}"
export VIDEOMME_DIR="${WORK_ROOT}/videomme"
export VIDEOMME_VIDEOS="${WORK_ROOT}/videomme/data/data"
export LVBENCH_VIDEOS="${WORK_ROOT}/lvbench"
export MERGED_DIR="${WORK_ROOT}/merged_models"
export DATA_ROOT="${WORK_ROOT}/video_source"
# vLLM: Qwen3-Omni audio encoder has 20 heads — use tp that divides 20 (e.g. 4, not 8).
export TP="${TP:-4}"
export GPUS="${GPUS:-0,1,2,3}"
```
## 1) Download all data (once per machine)
```bash
bash setup_data.sh
# Downloads Video-MME, LVBench, sync videos + audio to /opt/dlami/nvme.
# Or override: WORK_ROOT=/my/disk bash setup_data.sh
```
Or download individually:
```bash
python "${SCRIPTS}/download_videomme.py" --output-dir "${VIDEOMME_DIR}"
python "${SCRIPTS}/download_lvbench.py" --output-dir "${LVBENCH_VIDEOS}"
```
## 2) Merge DPO LoRA → full model
Base for merge:
```bash
export BASE_SFT="Rakancorle11/qwen3omni_full_sft_revised_thinker_key"
```
```bash
mkdir -p "${MERGED_DIR}"
python "${SCRIPTS}/merge_adapter.py" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v2_8632 \
--output "${MERGED_DIR}/dpo_v2_8632"
python "${SCRIPTS}/merge_adapter.py" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v3_8632 \
--output "${MERGED_DIR}/dpo_v3_8632"
python "${SCRIPTS}/merge_adapter.py" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
--output "${MERGED_DIR}/dpo_v4_8632"
python "${SCRIPTS}/merge_adapter.py" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v5_12075 \
--output "${MERGED_DIR}/dpo_v5_12075"
```
## 3) Video-MME — pick model + label
**vLLM (fast)** — `--base-model` must be a **merged** full checkpoint path or a full model id:
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
--video-dir "${VIDEOMME_VIDEOS}" \
--output-dir "${EVAL_ROOT}/videomme" \
--vllm --tp "${TP}" \
--max-samples -1 --label vmme_vanilla
```
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
--base-model "${BASE_SFT}" \
--video-dir "${VIDEOMME_VIDEOS}" \
--output-dir "${EVAL_ROOT}/videomme" \
--vllm --tp "${TP}" \
--max-samples -1 --label vmme_full_sft
```
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
--base-model "${MERGED_DIR}/dpo_v4_8632" \
--video-dir "${VIDEOMME_VIDEOS}" \
--output-dir "${EVAL_ROOT}/videomme" \
--vllm --tp "${TP}" \
--max-samples -1 --label vmme_dpo_v4_8632
```
**Transformers only** (no `--vllm`):
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
--video-dir "${VIDEOMME_VIDEOS}" \
--output-dir "${EVAL_ROOT}/videomme" \
--max-samples -1 --label vmme_dpo_v4_adapter
```
## 4) LVBench — same pattern
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_lvbench.py" \
--base-model "${MERGED_DIR}/dpo_v4_8632" \
--video-dir "${LVBENCH_VIDEOS}" \
--output-dir "${EVAL_ROOT}/lvbench" \
--vllm --tp "${TP}" \
--max-samples -1 --label lvb_dpo_v4_8632
```
## 5) In-domain sync — transformers (`--data-root` + `--test-jsonl`)
```bash
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_dpo_sync.py" \
--data-root "${DATA_ROOT}" \
--base-model "${BASE_SFT}" \
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
--test-jsonl "${SYNC_TEST}" \
--output-dir "${EVAL_ROOT}/sync" \
--label sync_dpo_v4_8632
```
Omit `--video-dir` / `--output-dir` on Video-MME & LVBench if you keep the same layout (scripts default to nvme videos + `/home/ubuntu/eval_results/...`). Omit `--test-jsonl` if you copied the test file to `${DATA_ROOT}/kto_training_data_v2_test.jsonl`; omit `--output-dir` on sync to use `/home/ubuntu/eval_results/sync`.
Optional GPT judge for parsing:
```bash
export OPENAI_API_KEY=sk-...
python "${SCRIPTS}/eval_dpo_sync.py" \
--data-root "${DATA_ROOT}" \
--base-model "${BASE_SFT}" \
--test-jsonl "${SYNC_TEST}" \
--output-dir "${EVAL_ROOT}/sync" \
--label sync_full_sft \
--gpt-judge
```
## 6) Recompute Video-MME metrics from `eval_results.jsonl`
```bash
python "${SCRIPTS}/compute_videomme_metrics_from_jsonl.py" \
--jsonl "${EVAL_ROOT}/videomme/vmme_vanilla/eval_results.jsonl" \
--out "${EVAL_ROOT}/videomme/vmme_vanilla/metrics.json"
```
Results for each run live under:
- `${EVAL_ROOT}/videomme/<label>/`
- `${EVAL_ROOT}/lvbench/<label>/`
- `${EVAL_ROOT}/sync/<label>/`
|