| #!/usr/bin/env bash |
| set -euo pipefail |
| set -x |
|
|
| SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" |
| REPO_ROOT="${SCRIPT_DIR}" |
| cd "${REPO_ROOT}" |
|
|
| export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}" |
| export UPSTREAM_SGL_ROOT="${UPSTREAM_SGL_ROOT:-/root/SGL}" |
| export PYTHONPATH="${UPSTREAM_SGL_ROOT}:${PYTHONPATH}" |
| export PYTHONPATH="${UPSTREAM_SGL_ROOT}/eval/vqa:${PYTHONPATH}" |
|
|
| PYTHON_BIN=${PYTHON_BIN:-/root/miniconda3/envs/sgl/bin/python} |
| DATA_ROOT=${DATA_ROOT:-/root/data} |
| TEXTVQA_ROOT=${TEXTVQA_ROOT:-"${DATA_ROOT}/textvqa"} |
| CHECKPOINT_ROOT=${CHECKPOINT_ROOT:-"${DATA_ROOT}"} |
| GUIDE_CHECKPOINT=${GUIDE_CHECKPOINT:-"${CHECKPOINT_ROOT}/InternVL2-1B"} |
| LARGE_CHECKPOINT=${LARGE_CHECKPOINT:-"${CHECKPOINT_ROOT}/InternVL2-8B"} |
| OUT_DIR=${OUT_DIR:-"${REPO_ROOT}/outputs/textvqa_shared_vision_1bguide_8btext"} |
| RUN_NAME=${RUN_NAME:-textvqa_shared_vision_1bguide_8btext} |
| LOG_PATH=${LOG_PATH:-"${OUT_DIR}/run.log"} |
| PRUNE_LAYER=${PRUNE_LAYER:-0.0} |
| PRUNE_RATIO=${PRUNE_RATIO:-0.4} |
| CONSISTENCY_TOKEN_RATIO=${CONSISTENCY_TOKEN_RATIO:-0.05} |
| LIMIT=${LIMIT:-} |
| REASONING_MODE=${REASONING_MODE:-none} |
| REASONING_MAX_NEW_TOKENS=${REASONING_MAX_NEW_TOKENS:-64} |
| REASONING_TEMPERATURE=${REASONING_TEMPERATURE:-0.0} |
| SAVE_REASONING=${SAVE_REASONING:-0} |
| GUIDE_REASONING_MODE=${GUIDE_REASONING_MODE:-none} |
| GUIDE_REASONING_MAX_NEW_TOKENS=${GUIDE_REASONING_MAX_NEW_TOKENS:-1024} |
| GUIDE_REASONING_TEMPERATURE=${GUIDE_REASONING_TEMPERATURE:-0.0} |
| GUIDE_REASONING_FILTER_MODE=${GUIDE_REASONING_FILTER_MODE:-none} |
| GUIDE_ATTENTION_SOURCE=${GUIDE_ATTENTION_SOURCE:-default} |
| GUIDE_ATTENTION_AGGREGATION_MODE=${GUIDE_ATTENTION_AGGREGATION_MODE:-raw} |
| GUIDE_QUESTION_ATTENTION_WEIGHT=${GUIDE_QUESTION_ATTENTION_WEIGHT:-1.0} |
| GUIDE_REASONING_ATTENTION_WEIGHT=${GUIDE_REASONING_ATTENTION_WEIGHT:-1.0} |
| GUIDE_ANSWER_ATTENTION_WEIGHT=${GUIDE_ANSWER_ATTENTION_WEIGHT:-1.0} |
| GUIDE_TEXT_MODE=${GUIDE_TEXT_MODE:-none} |
| GUIDE_TEXT_MAX_NEW_TOKENS=${GUIDE_TEXT_MAX_NEW_TOKENS:-12} |
| GUIDE_TEXT_TEMPERATURE=${GUIDE_TEXT_TEMPERATURE:-0.0} |
| PRUNE_SELECTION_MODE=${PRUNE_SELECTION_MODE:-topk} |
| SEED=${SEED:-0} |
|
|
| mkdir -p "${OUT_DIR}" |
|
|
| if [[ "${ENABLE_RUN_LOG:-1}" == "1" ]]; then |
| exec > >(tee -a "${LOG_PATH}") 2>&1 |
| fi |
|
|
| EXTRA_ARGS=() |
| if [[ "${REASONING_MODE}" != "none" ]]; then |
| EXTRA_ARGS+=( |
| --reasoning-mode "${REASONING_MODE}" |
| --reasoning-max-new-tokens "${REASONING_MAX_NEW_TOKENS}" |
| --reasoning-temperature "${REASONING_TEMPERATURE}" |
| ) |
| fi |
| if [[ "${SAVE_REASONING}" == "1" ]]; then |
| EXTRA_ARGS+=(--save-reasoning) |
| fi |
| if [[ "${GUIDE_REASONING_MODE}" != "none" ]]; then |
| EXTRA_ARGS+=( |
| --guide-reasoning-mode "${GUIDE_REASONING_MODE}" |
| --guide-reasoning-max-new-tokens "${GUIDE_REASONING_MAX_NEW_TOKENS}" |
| --guide-reasoning-temperature "${GUIDE_REASONING_TEMPERATURE}" |
| --guide-reasoning-filter-mode "${GUIDE_REASONING_FILTER_MODE}" |
| --guide-attention-source "${GUIDE_ATTENTION_SOURCE}" |
| --guide-attention-aggregation-mode "${GUIDE_ATTENTION_AGGREGATION_MODE}" |
| --guide-reasoning-attention-weight "${GUIDE_REASONING_ATTENTION_WEIGHT}" |
| --guide-answer-attention-weight "${GUIDE_ANSWER_ATTENTION_WEIGHT}" |
| ) |
| fi |
| EXTRA_ARGS+=( |
| --guide-question-attention-weight "${GUIDE_QUESTION_ATTENTION_WEIGHT}" |
| --guide-answer-attention-weight "${GUIDE_ANSWER_ATTENTION_WEIGHT}" |
| ) |
| if [[ "${GUIDE_TEXT_MODE}" != "none" ]]; then |
| EXTRA_ARGS+=( |
| --guide-text-mode "${GUIDE_TEXT_MODE}" |
| --guide-text-max-new-tokens "${GUIDE_TEXT_MAX_NEW_TOKENS}" |
| --guide-text-temperature "${GUIDE_TEXT_TEMPERATURE}" |
| ) |
| fi |
|
|
| echo "start_time=$(date '+%Y-%m-%d %H:%M:%S')" |
| echo "guide_checkpoint=${GUIDE_CHECKPOINT}" |
| echo "large_checkpoint=${LARGE_CHECKPOINT}" |
| echo "data_root=${DATA_ROOT}" |
| echo "textvqa_root=${TEXTVQA_ROOT}" |
| echo "out_dir=${OUT_DIR}" |
| echo "run_name=${RUN_NAME}" |
| echo "prune_layer=${PRUNE_LAYER}" |
| echo "prune_ratio=${PRUNE_RATIO}" |
| echo "prune_selection_mode=${PRUNE_SELECTION_MODE}" |
| echo "consistency_token_ratio=${CONSISTENCY_TOKEN_RATIO}" |
| echo "limit=${LIMIT:-full}" |
| echo "seed=${SEED}" |
| echo "guide_question_attention_weight=${GUIDE_QUESTION_ATTENTION_WEIGHT}" |
| echo "guide_answer_attention_weight=${GUIDE_ANSWER_ATTENTION_WEIGHT}" |
| echo "guide_reasoning_mode=${GUIDE_REASONING_MODE}" |
| echo "guide_reasoning_filter_mode=${GUIDE_REASONING_FILTER_MODE}" |
| echo "guide_attention_aggregation_mode=${GUIDE_ATTENTION_AGGREGATION_MODE}" |
| echo "guide_text_mode=${GUIDE_TEXT_MODE}" |
| echo |
|
|
| CMD=( |
| "${PYTHON_BIN}" eval/vqa/run_shared_vision_guided_textvqa.py |
| --guide-checkpoint "${GUIDE_CHECKPOINT}" |
| --large-checkpoint "${LARGE_CHECKPOINT}" |
| --data-root "${DATA_ROOT}" |
| --textvqa-root "${TEXTVQA_ROOT}" |
| --dynamic |
| --out-dir "${OUT_DIR}" |
| --run-name "${RUN_NAME}" |
| --large-model-prune-layer "${PRUNE_LAYER}" |
| --large-model-prune-ratio "${PRUNE_RATIO}" |
| --large-model-prune-selection "${PRUNE_SELECTION_MODE}" |
| --consistency-token-ratio "${CONSISTENCY_TOKEN_RATIO}" |
| --seed "${SEED}" |
| ) |
|
|
| if [[ -n "${LIMIT}" ]]; then |
| CMD+=(--limit "${LIMIT}") |
| fi |
|
|
| "${CMD[@]}" \ |
| "${EXTRA_ARGS[@]}" |
|
|