sgl / run_textvqa_shared_vision_keep40_keep09.sh
xiaohaoWillX's picture
Add files using upload-large-folder tool
2f48f4d verified
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="${SCRIPT_DIR}"
cd "${REPO_ROOT}"
export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}"
export UPSTREAM_SGL_ROOT="${UPSTREAM_SGL_ROOT:-/home/yf/snap/SGL}"
PYTHON_BIN=${PYTHON_BIN:-/home/yf/.conda/envs/sgl/bin/python}
GPU_ID=${GPU_ID:-0}
GUIDE_CHECKPOINT=${GUIDE_CHECKPOINT:-/home/yf/snap/data/yf/InternVL2-2B}
LARGE_CHECKPOINT=${LARGE_CHECKPOINT:-/home/yf/snap/data/yf/InternVL2-8B}
DATA_ROOT=${DATA_ROOT:-/home/yf/snap/data/yf}
TEXTVQA_ROOT=${TEXTVQA_ROOT:-/home/yf/snap/data/yf/textvqa}
OUT_ROOT=${OUT_ROOT:-"${REPO_ROOT}/outputs/shared_vision_full"}
PRUNE_LAYER=${PRUNE_LAYER:-0.0}
CONSISTENCY_TOKEN_RATIO=${CONSISTENCY_TOKEN_RATIO:-0.05}
REASONING_MODE=${REASONING_MODE:-none}
REASONING_MAX_NEW_TOKENS=${REASONING_MAX_NEW_TOKENS:-64}
REASONING_TEMPERATURE=${REASONING_TEMPERATURE:-0.0}
SAVE_REASONING=${SAVE_REASONING:-0}
GUIDE_REASONING_MODE=${GUIDE_REASONING_MODE:-none}
GUIDE_REASONING_MAX_NEW_TOKENS=${GUIDE_REASONING_MAX_NEW_TOKENS:-1024}
GUIDE_REASONING_TEMPERATURE=${GUIDE_REASONING_TEMPERATURE:-0.0}
GUIDE_REASONING_FILTER_MODE=${GUIDE_REASONING_FILTER_MODE:-none}
GUIDE_ATTENTION_SOURCE=${GUIDE_ATTENTION_SOURCE:-default}
GUIDE_ATTENTION_AGGREGATION_MODE=${GUIDE_ATTENTION_AGGREGATION_MODE:-raw}
GUIDE_REASONING_ATTENTION_WEIGHT=${GUIDE_REASONING_ATTENTION_WEIGHT:-1.0}
GUIDE_ANSWER_ATTENTION_WEIGHT=${GUIDE_ANSWER_ATTENTION_WEIGHT:-1.0}
GUIDE_TEXT_MODE=${GUIDE_TEXT_MODE:-none}
GUIDE_TEXT_MAX_NEW_TOKENS=${GUIDE_TEXT_MAX_NEW_TOKENS:-12}
GUIDE_TEXT_TEMPERATURE=${GUIDE_TEXT_TEMPERATURE:-0.0}
PRUNE_SELECTION_MODE=${PRUNE_SELECTION_MODE:-topk}
SEED=${SEED:-0}
run_one() {
local label="$1"
local prune_ratio="$2"
local out_dir="${OUT_ROOT}/${label}"
local run_name="textvqa_shared_vision_2bguide_8btext_${label}"
if [[ "${PRUNE_SELECTION_MODE}" != "topk" ]]; then
out_dir="${OUT_ROOT}/${label}_${PRUNE_SELECTION_MODE}"
run_name="${run_name}_${PRUNE_SELECTION_MODE}"
fi
local log_path="${out_dir}/${run_name}.log"
mkdir -p "${out_dir}"
{
echo "start_time=$(date '+%Y-%m-%d %H:%M:%S')"
echo "gpu_id=${GPU_ID}"
echo "prune_layer=${PRUNE_LAYER}"
echo "prune_ratio=${prune_ratio}"
echo "consistency_token_ratio=${CONSISTENCY_TOKEN_RATIO}"
echo "reasoning_mode=${REASONING_MODE}"
echo "reasoning_max_new_tokens=${REASONING_MAX_NEW_TOKENS}"
echo "reasoning_temperature=${REASONING_TEMPERATURE}"
echo "save_reasoning=${SAVE_REASONING}"
echo "guide_reasoning_mode=${GUIDE_REASONING_MODE}"
echo "guide_reasoning_max_new_tokens=${GUIDE_REASONING_MAX_NEW_TOKENS}"
echo "guide_reasoning_temperature=${GUIDE_REASONING_TEMPERATURE}"
echo "guide_reasoning_filter_mode=${GUIDE_REASONING_FILTER_MODE}"
echo "guide_attention_source=${GUIDE_ATTENTION_SOURCE}"
echo "guide_attention_aggregation_mode=${GUIDE_ATTENTION_AGGREGATION_MODE}"
echo "guide_reasoning_attention_weight=${GUIDE_REASONING_ATTENTION_WEIGHT}"
echo "guide_answer_attention_weight=${GUIDE_ANSWER_ATTENTION_WEIGHT}"
echo "guide_text_mode=${GUIDE_TEXT_MODE}"
echo "guide_text_max_new_tokens=${GUIDE_TEXT_MAX_NEW_TOKENS}"
echo "guide_text_temperature=${GUIDE_TEXT_TEMPERATURE}"
echo "prune_selection_mode=${PRUNE_SELECTION_MODE}"
echo "seed=${SEED}"
echo "guide_checkpoint=${GUIDE_CHECKPOINT}"
echo "large_checkpoint=${LARGE_CHECKPOINT}"
echo "data_root=${DATA_ROOT}"
echo "textvqa_root=${TEXTVQA_ROOT}"
echo
} >> "${log_path}"
local extra_args=()
if [[ "${REASONING_MODE}" != "none" ]]; then
extra_args+=(
--reasoning-mode "${REASONING_MODE}"
--reasoning-max-new-tokens "${REASONING_MAX_NEW_TOKENS}"
--reasoning-temperature "${REASONING_TEMPERATURE}"
)
fi
if [[ "${SAVE_REASONING}" == "1" ]]; then
extra_args+=(--save-reasoning)
fi
if [[ "${GUIDE_REASONING_MODE}" != "none" ]]; then
extra_args+=(
--guide-reasoning-mode "${GUIDE_REASONING_MODE}"
--guide-reasoning-max-new-tokens "${GUIDE_REASONING_MAX_NEW_TOKENS}"
--guide-reasoning-temperature "${GUIDE_REASONING_TEMPERATURE}"
--guide-reasoning-filter-mode "${GUIDE_REASONING_FILTER_MODE}"
--guide-attention-source "${GUIDE_ATTENTION_SOURCE}"
--guide-attention-aggregation-mode "${GUIDE_ATTENTION_AGGREGATION_MODE}"
--guide-reasoning-attention-weight "${GUIDE_REASONING_ATTENTION_WEIGHT}"
--guide-answer-attention-weight "${GUIDE_ANSWER_ATTENTION_WEIGHT}"
)
fi
if [[ "${GUIDE_TEXT_MODE}" != "none" ]]; then
extra_args+=(
--guide-text-mode "${GUIDE_TEXT_MODE}"
--guide-text-max-new-tokens "${GUIDE_TEXT_MAX_NEW_TOKENS}"
--guide-text-temperature "${GUIDE_TEXT_TEMPERATURE}"
)
fi
CUDA_VISIBLE_DEVICES="${GPU_ID}" \
"${PYTHON_BIN}" "${REPO_ROOT}/eval/vqa/run_shared_vision_guided_textvqa.py" \
--guide-checkpoint "${GUIDE_CHECKPOINT}" \
--large-checkpoint "${LARGE_CHECKPOINT}" \
--data-root "${DATA_ROOT}" \
--textvqa-root "${TEXTVQA_ROOT}" \
--out-dir "${out_dir}" \
--run-name "${run_name}" \
--dynamic \
--large-model-prune-layer "${PRUNE_LAYER}" \
--large-model-prune-ratio "${prune_ratio}" \
--large-model-prune-selection "${PRUNE_SELECTION_MODE}" \
--consistency-token-ratio "${CONSISTENCY_TOKEN_RATIO}" \
--seed "${SEED}" \
"${extra_args[@]}" \
>> "${log_path}" 2>&1
}
run_one keep40 0.4
run_one keep09 0.09