File size: 5,788 Bytes
2f48f4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#!/usr/bin/env bash
set -euo pipefail

SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="${SCRIPT_DIR}"
cd "${REPO_ROOT}"

export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}"
export UPSTREAM_SGL_ROOT="${UPSTREAM_SGL_ROOT:-/home/yf/snap/SGL}"

PYTHON_BIN=${PYTHON_BIN:-/home/yf/.conda/envs/sgl/bin/python}
GPU_ID=${GPU_ID:-0}
GUIDE_CHECKPOINT=${GUIDE_CHECKPOINT:-/home/yf/snap/data/yf/InternVL2-2B}
LARGE_CHECKPOINT=${LARGE_CHECKPOINT:-/home/yf/snap/data/yf/InternVL2-8B}
DATA_ROOT=${DATA_ROOT:-/home/yf/snap/data/yf}
TEXTVQA_ROOT=${TEXTVQA_ROOT:-/home/yf/snap/data/yf/textvqa}
OUT_ROOT=${OUT_ROOT:-"${REPO_ROOT}/outputs/shared_vision_full"}
PRUNE_LAYER=${PRUNE_LAYER:-0.0}
CONSISTENCY_TOKEN_RATIO=${CONSISTENCY_TOKEN_RATIO:-0.05}
REASONING_MODE=${REASONING_MODE:-none}
REASONING_MAX_NEW_TOKENS=${REASONING_MAX_NEW_TOKENS:-64}
REASONING_TEMPERATURE=${REASONING_TEMPERATURE:-0.0}
SAVE_REASONING=${SAVE_REASONING:-0}
GUIDE_REASONING_MODE=${GUIDE_REASONING_MODE:-none}
GUIDE_REASONING_MAX_NEW_TOKENS=${GUIDE_REASONING_MAX_NEW_TOKENS:-1024}
GUIDE_REASONING_TEMPERATURE=${GUIDE_REASONING_TEMPERATURE:-0.0}
GUIDE_REASONING_FILTER_MODE=${GUIDE_REASONING_FILTER_MODE:-none}
GUIDE_ATTENTION_SOURCE=${GUIDE_ATTENTION_SOURCE:-default}
GUIDE_ATTENTION_AGGREGATION_MODE=${GUIDE_ATTENTION_AGGREGATION_MODE:-raw}
GUIDE_REASONING_ATTENTION_WEIGHT=${GUIDE_REASONING_ATTENTION_WEIGHT:-1.0}
GUIDE_ANSWER_ATTENTION_WEIGHT=${GUIDE_ANSWER_ATTENTION_WEIGHT:-1.0}
GUIDE_TEXT_MODE=${GUIDE_TEXT_MODE:-none}
GUIDE_TEXT_MAX_NEW_TOKENS=${GUIDE_TEXT_MAX_NEW_TOKENS:-12}
GUIDE_TEXT_TEMPERATURE=${GUIDE_TEXT_TEMPERATURE:-0.0}
PRUNE_SELECTION_MODE=${PRUNE_SELECTION_MODE:-topk}
SEED=${SEED:-0}

run_one() {
    local label="$1"
    local prune_ratio="$2"
    local out_dir="${OUT_ROOT}/${label}"
    local run_name="textvqa_shared_vision_2bguide_8btext_${label}"
    if [[ "${PRUNE_SELECTION_MODE}" != "topk" ]]; then
        out_dir="${OUT_ROOT}/${label}_${PRUNE_SELECTION_MODE}"
        run_name="${run_name}_${PRUNE_SELECTION_MODE}"
    fi
    local log_path="${out_dir}/${run_name}.log"

    mkdir -p "${out_dir}"

    {
        echo "start_time=$(date '+%Y-%m-%d %H:%M:%S')"
        echo "gpu_id=${GPU_ID}"
        echo "prune_layer=${PRUNE_LAYER}"
        echo "prune_ratio=${prune_ratio}"
        echo "consistency_token_ratio=${CONSISTENCY_TOKEN_RATIO}"
        echo "reasoning_mode=${REASONING_MODE}"
        echo "reasoning_max_new_tokens=${REASONING_MAX_NEW_TOKENS}"
        echo "reasoning_temperature=${REASONING_TEMPERATURE}"
        echo "save_reasoning=${SAVE_REASONING}"
        echo "guide_reasoning_mode=${GUIDE_REASONING_MODE}"
        echo "guide_reasoning_max_new_tokens=${GUIDE_REASONING_MAX_NEW_TOKENS}"
        echo "guide_reasoning_temperature=${GUIDE_REASONING_TEMPERATURE}"
        echo "guide_reasoning_filter_mode=${GUIDE_REASONING_FILTER_MODE}"
        echo "guide_attention_source=${GUIDE_ATTENTION_SOURCE}"
        echo "guide_attention_aggregation_mode=${GUIDE_ATTENTION_AGGREGATION_MODE}"
        echo "guide_reasoning_attention_weight=${GUIDE_REASONING_ATTENTION_WEIGHT}"
        echo "guide_answer_attention_weight=${GUIDE_ANSWER_ATTENTION_WEIGHT}"
        echo "guide_text_mode=${GUIDE_TEXT_MODE}"
        echo "guide_text_max_new_tokens=${GUIDE_TEXT_MAX_NEW_TOKENS}"
        echo "guide_text_temperature=${GUIDE_TEXT_TEMPERATURE}"
        echo "prune_selection_mode=${PRUNE_SELECTION_MODE}"
        echo "seed=${SEED}"
        echo "guide_checkpoint=${GUIDE_CHECKPOINT}"
        echo "large_checkpoint=${LARGE_CHECKPOINT}"
        echo "data_root=${DATA_ROOT}"
        echo "textvqa_root=${TEXTVQA_ROOT}"
        echo
    } >> "${log_path}"

    local extra_args=()
    if [[ "${REASONING_MODE}" != "none" ]]; then
        extra_args+=(
            --reasoning-mode "${REASONING_MODE}"
            --reasoning-max-new-tokens "${REASONING_MAX_NEW_TOKENS}"
            --reasoning-temperature "${REASONING_TEMPERATURE}"
        )
    fi
    if [[ "${SAVE_REASONING}" == "1" ]]; then
        extra_args+=(--save-reasoning)
    fi
    if [[ "${GUIDE_REASONING_MODE}" != "none" ]]; then
        extra_args+=(
            --guide-reasoning-mode "${GUIDE_REASONING_MODE}"
            --guide-reasoning-max-new-tokens "${GUIDE_REASONING_MAX_NEW_TOKENS}"
            --guide-reasoning-temperature "${GUIDE_REASONING_TEMPERATURE}"
            --guide-reasoning-filter-mode "${GUIDE_REASONING_FILTER_MODE}"
            --guide-attention-source "${GUIDE_ATTENTION_SOURCE}"
            --guide-attention-aggregation-mode "${GUIDE_ATTENTION_AGGREGATION_MODE}"
            --guide-reasoning-attention-weight "${GUIDE_REASONING_ATTENTION_WEIGHT}"
            --guide-answer-attention-weight "${GUIDE_ANSWER_ATTENTION_WEIGHT}"
        )
    fi
    if [[ "${GUIDE_TEXT_MODE}" != "none" ]]; then
        extra_args+=(
            --guide-text-mode "${GUIDE_TEXT_MODE}"
            --guide-text-max-new-tokens "${GUIDE_TEXT_MAX_NEW_TOKENS}"
            --guide-text-temperature "${GUIDE_TEXT_TEMPERATURE}"
        )
    fi

    CUDA_VISIBLE_DEVICES="${GPU_ID}" \
    "${PYTHON_BIN}" "${REPO_ROOT}/eval/vqa/run_shared_vision_guided_textvqa.py" \
        --guide-checkpoint "${GUIDE_CHECKPOINT}" \
        --large-checkpoint "${LARGE_CHECKPOINT}" \
        --data-root "${DATA_ROOT}" \
        --textvqa-root "${TEXTVQA_ROOT}" \
        --out-dir "${out_dir}" \
        --run-name "${run_name}" \
        --dynamic \
        --large-model-prune-layer "${PRUNE_LAYER}" \
        --large-model-prune-ratio "${prune_ratio}" \
        --large-model-prune-selection "${PRUNE_SELECTION_MODE}" \
        --consistency-token-ratio "${CONSISTENCY_TOKEN_RATIO}" \
        --seed "${SEED}" \
        "${extra_args[@]}" \
        >> "${log_path}" 2>&1
}

run_one keep40 0.4
run_one keep09 0.09