File size: 3,346 Bytes
60e7f1f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | #!/usr/bin/env bash
set -euo pipefail
set -x
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="${SCRIPT_DIR}"
cd "${REPO_ROOT}"
export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}"
export UPSTREAM_SGL_ROOT="${UPSTREAM_SGL_ROOT:-/home/yf/snap/SGL}"
PYTHON_BIN=${PYTHON_BIN:-python}
CHECKPOINT_ROOT=${CHECKPOINT_ROOT:-"${REPO_ROOT}/checkpoints"}
GUIDE_CHECKPOINT=${GUIDE_CHECKPOINT:-"${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-2B"}
LARGE_CHECKPOINT=${LARGE_CHECKPOINT:-"${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-8B"}
OUT_DIR=${OUT_DIR:-"${REPO_ROOT}/outputs/textvqa_shared_vision_2bguide_8btext"}
PRUNE_LAYER=${PRUNE_LAYER:-0.0}
PRUNE_RATIO=${PRUNE_RATIO:-0.4}
CONSISTENCY_TOKEN_RATIO=${CONSISTENCY_TOKEN_RATIO:-0.05}
REASONING_MODE=${REASONING_MODE:-none}
REASONING_MAX_NEW_TOKENS=${REASONING_MAX_NEW_TOKENS:-64}
REASONING_TEMPERATURE=${REASONING_TEMPERATURE:-0.0}
SAVE_REASONING=${SAVE_REASONING:-0}
GUIDE_REASONING_MODE=${GUIDE_REASONING_MODE:-none}
GUIDE_REASONING_MAX_NEW_TOKENS=${GUIDE_REASONING_MAX_NEW_TOKENS:-1024}
GUIDE_REASONING_TEMPERATURE=${GUIDE_REASONING_TEMPERATURE:-0.0}
GUIDE_REASONING_FILTER_MODE=${GUIDE_REASONING_FILTER_MODE:-none}
GUIDE_ATTENTION_SOURCE=${GUIDE_ATTENTION_SOURCE:-default}
GUIDE_ATTENTION_AGGREGATION_MODE=${GUIDE_ATTENTION_AGGREGATION_MODE:-raw}
GUIDE_REASONING_ATTENTION_WEIGHT=${GUIDE_REASONING_ATTENTION_WEIGHT:-1.0}
GUIDE_ANSWER_ATTENTION_WEIGHT=${GUIDE_ANSWER_ATTENTION_WEIGHT:-1.0}
GUIDE_TEXT_MODE=${GUIDE_TEXT_MODE:-none}
GUIDE_TEXT_MAX_NEW_TOKENS=${GUIDE_TEXT_MAX_NEW_TOKENS:-12}
GUIDE_TEXT_TEMPERATURE=${GUIDE_TEXT_TEMPERATURE:-0.0}
mkdir -p "${OUT_DIR}"
EXTRA_ARGS=()
if [[ "${REASONING_MODE}" != "none" ]]; then
EXTRA_ARGS+=(
--reasoning-mode "${REASONING_MODE}"
--reasoning-max-new-tokens "${REASONING_MAX_NEW_TOKENS}"
--reasoning-temperature "${REASONING_TEMPERATURE}"
)
fi
if [[ "${SAVE_REASONING}" == "1" ]]; then
EXTRA_ARGS+=(--save-reasoning)
fi
if [[ "${GUIDE_REASONING_MODE}" != "none" ]]; then
EXTRA_ARGS+=(
--guide-reasoning-mode "${GUIDE_REASONING_MODE}"
--guide-reasoning-max-new-tokens "${GUIDE_REASONING_MAX_NEW_TOKENS}"
--guide-reasoning-temperature "${GUIDE_REASONING_TEMPERATURE}"
--guide-reasoning-filter-mode "${GUIDE_REASONING_FILTER_MODE}"
--guide-attention-source "${GUIDE_ATTENTION_SOURCE}"
--guide-attention-aggregation-mode "${GUIDE_ATTENTION_AGGREGATION_MODE}"
--guide-reasoning-attention-weight "${GUIDE_REASONING_ATTENTION_WEIGHT}"
--guide-answer-attention-weight "${GUIDE_ANSWER_ATTENTION_WEIGHT}"
)
fi
if [[ "${GUIDE_TEXT_MODE}" != "none" ]]; then
EXTRA_ARGS+=(
--guide-text-mode "${GUIDE_TEXT_MODE}"
--guide-text-max-new-tokens "${GUIDE_TEXT_MAX_NEW_TOKENS}"
--guide-text-temperature "${GUIDE_TEXT_TEMPERATURE}"
)
fi
"${PYTHON_BIN}" eval/vqa/run_shared_vision_guided_textvqa.py \
--guide-checkpoint "${GUIDE_CHECKPOINT}" \
--large-checkpoint "${LARGE_CHECKPOINT}" \
--data-root "${REPO_ROOT}" \
--dynamic \
--out-dir "${OUT_DIR}" \
--run-name textvqa_shared_vision_2bguide_8btext \
--large-model-prune-layer "${PRUNE_LAYER}" \
--large-model-prune-ratio "${PRUNE_RATIO}" \
--consistency-token-ratio "${CONSISTENCY_TOKEN_RATIO}" \
"${EXTRA_ARGS[@]}"
|