experiments / probe_distance_prompt.py
ch-min's picture
Add files using upload-large-folder tool
3404d44 verified
#!/usr/bin/env python3
"""
probe_distance_prompt.py
========================
far / close ์งˆ๋ฌธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ฐ”๊ฟ”๊ฐ€๋ฉฐ ๋ชจ๋“  ๋ชจ๋ธ์˜ ์‘๋‹ต์„ ๋น„๊ตํ•˜๋Š” ํ…Œ์ŠคํŠธ ์Šคํฌ๋ฆฝํŠธ.
QUESTION_TEMPLATE (์•„๋ž˜)์„ ์ˆ˜์ • ํ›„ ์‹คํ–‰:
python probe_distance_prompt.py --data_type real --model_type nvila
python probe_distance_prompt.py --data_type synthetic --model_type molmo
python probe_distance_prompt.py --data_type real --model_type nvila molmo qwen
ํ™˜๊ฒฝ๋ณ„ ์‹คํ–‰:
vila env โ†’ nvila ๋ชจ๋ธ
molmo env โ†’ molmo ๋ชจ๋ธ
system โ†’ qwen ๋ชจ๋ธ
๊ฒฐ๊ณผ: logs/probe_distance/YYYYMMDD_HHMMSS_<data_type>.log
"""
import os, sys, json, random, argparse, gc
from datetime import datetime
from io import BytesIO
import base64
# โ”€โ”€โ”€ sys.path (swap_analysis import) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
_HERE = os.path.dirname(os.path.abspath(__file__))
_SA_DIR = os.path.join(_HERE, 'swap_analysis')
sys.path.insert(0, _SA_DIR)
# =============================================================================
# {subj} = ๊ฑฐ๋ฆฌ๋ฅผ ํ‰๊ฐ€ํ•  ๋Œ€์ƒ ๊ฐ์ฒด
# {ref} = ๊ธฐ์ค€์ด ๋˜๋Š” ์ฐธ์กฐ ๊ฐ์ฒด
# =============================================================================
# ์„ ํƒ์ง€ ์ˆœ์„œ๋ฅผ ์ƒ˜ํ”Œ๋งˆ๋‹ค ๊ต๋Œ€ํ•ด A/B position bias๋ฅผ ์ƒ์‡„
# far_first (์ง์ˆ˜ ์ƒ˜ํ”Œ): (A) far (B) close
# close_first (ํ™€์ˆ˜ ์ƒ˜ํ”Œ): (A) close (B) far
_Q_BASE = "Compared to {ref}, is {subj} far or close from you? "
_Q_TAIL = "Answer with a single letter A or B."
QUESTION_TEMPLATES = {
'far_first': _Q_BASE + "(A) far (B) close " + _Q_TAIL,
'close_first': _Q_BASE + "(A) close (B) far " + _Q_TAIL,
}
# =============================================================================
N_SAMPLES = 10 # ์นดํ…Œ๊ณ ๋ฆฌ(far / close)๋ณ„ ์ƒ˜ํ”Œ ์ˆ˜
SEED = 42
REAL_TSV = '/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv'
SYNTH_DIR = '/data/shared/Qwen/synthetic/2body'
OUTPUT_ROOT = os.path.join(_HERE, 'logs', 'probe_distance')
# ์ •๋‹ต ์ธ์‹ ๋™์˜์–ด (MCQ "(a)"/"(b)" ํŒจํ„ด์€ check()์—์„œ variant๋ณ„๋กœ ๋™์  ์ฒ˜๋ฆฌ)
_SYNONYMS = {
'far': ['farther', 'further', 'distant', 'far away', 'further away'],
'close': ['closer', 'near', 'nearby', 'nearer'],
}
# MCQ ์„ ํƒ์ง€ ๋งคํ•‘ (variant๋ณ„): ์–ด๋А template์„ ์ผ๋Š”์ง€์— ๋”ฐ๋ผ A/B๊ฐ€ ๋‹ฌ๋ผ์ง
_MCQ_LETTER = {
'far_first': {'far': 'a', 'close': 'b'},
'close_first': {'close': 'a', 'far': 'b'},
}
OPPOSITE = {'far': 'close', 'close': 'far'}
# =============================================================================
# ์ •๋‹ต ์ฒดํฌ
# =============================================================================
def check(pred: str, expected: str, mcq_map: dict) -> bool:
"""expected ๋‹จ์–ด ๋˜๋Š” MCQ ์„ ํƒ์ง€(A/B)๊ฐ€ opposite๋ณด๋‹ค ๋จผ์ € ๋‚˜์˜ค๋ฉด ์ •๋‹ต.
mcq_map: variant๋ณ„ letter ๋งคํ•‘ (์˜ˆ: {'far': 'a', 'close': 'b'})
"""
t = pred.strip().lower()
opposite = OPPOSITE[expected]
exp_letter = mcq_map[expected]
opp_letter = mcq_map[opposite]
# ๋‹จ๋… ์•ŒํŒŒ๋ฒณ ์„ ํƒ์ง€ ์‘๋‹ต ์ฒ˜๋ฆฌ (e.g. "A", "A.", "A)", "a")
if t in (exp_letter, exp_letter + '.', exp_letter + ')', exp_letter + ','):
return True
if t in (opp_letter, opp_letter + '.', opp_letter + ')', opp_letter + ','):
return False
# MCQ ์ธ๋ผ์ธ ํŒจํ„ด "(a)"/"(b)"์€ variant์— ๋”ฐ๋ผ ๋™์ ์œผ๋กœ ๊ฒฐ์ •
mcq_exp = f'({exp_letter})'
mcq_opp = f'({opp_letter})'
def earliest(word, extra=()):
pos = t.find(word)
for syn in list(_SYNONYMS.get(word, [])) + list(extra):
q = t.find(syn)
if q != -1:
pos = q if pos == -1 else min(pos, q)
return pos
pe = earliest(expected, (mcq_exp,))
po = earliest(opposite, (mcq_opp,))
return pe != -1 and (po == -1 or pe < po)
# =============================================================================
# ๋ฐ์ดํ„ฐ ๋กœ๋”ฉ
# =============================================================================
def load_real_samples(n: int, seed: int):
"""EmbSpatialBench TSV์—์„œ far/close ์ƒ˜ํ”Œ ๊ฐ n๊ฐœ ๋ฐ˜ํ™˜."""
import swap_analysis as sa
rng = random.Random(seed)
pairs = sa.load_swap_pairs(REAL_TSV, seed=seed)
def _valid(p):
"""reference_object๋‚˜ target_object๊ฐ€ 'Unknown' ๋˜๋Š” ๋น„์–ด์žˆ์œผ๋ฉด ์ œ์™ธ."""
for field in ('target_object', 'reference_object'):
v = p.get(field, '')
if not v or v.strip().lower() in ('unknown', 'n/a', ''):
return False
return True
far_pairs = [p for p in pairs if p['category'] == 'far' and _valid(p)]
close_pairs = [p for p in pairs if p['category'] == 'close' and _valid(p)]
rng.shuffle(far_pairs)
rng.shuffle(close_pairs)
samples = []
for p in far_pairs[:n]:
samples.append({
'category': 'far',
'subj': p['target_object'],
'ref': p['reference_object'],
'image_base64': p['image_base64'],
})
for p in close_pairs[:n]:
samples.append({
'category': 'close',
'subj': p['target_object'],
'ref': p['reference_object'],
'image_base64': p['image_base64'],
})
return samples
def load_synthetic_samples(n: int, seed: int):
"""synthetic/2body/far,close ์—์„œ ๊ฐ n๊ฐœ ๋ฐ˜ํ™˜."""
rng = random.Random(seed)
samples = []
for folder, cat in [('far', 'far'), ('close', 'close')]:
folder_path = os.path.join(SYNTH_DIR, folder)
json_path = os.path.join(folder_path, 'vqa.json')
with open(json_path) as f:
entries = json.load(f)
rng.shuffle(entries)
for entry in entries[:n]:
img_path = os.path.join(folder_path, entry['image'])
with open(img_path, 'rb') as f:
b64 = base64.b64encode(f.read()).decode('utf-8')
obj1 = entry['obj1']
obj2 = entry['obj2']
subj = f"{obj1['color']} {obj1['shape']}"
ref = f"{obj2['color']} {obj2['shape']}"
samples.append({
'category': cat,
'subj': subj,
'ref': ref,
'image_base64': b64,
})
return samples
# =============================================================================
# ๋ชจ๋ธ ์ŠคํŽ™ ๋ชฉ๋ก
# =============================================================================
def get_model_specs(model_type_filter=None):
"""MODEL_CONFIGS ๊ธฐ๋ฐ˜ (model_type, scale, path, cls_name) ๋ชฉ๋ก ๋ฐ˜ํ™˜."""
import swap_analysis as sa
extractor_cls = {
'molmo': 'MolmoExtractor',
'nvila': 'NVILAExtractor',
'qwen': 'Qwen25VLExtractor',
'nvila_synthetic': 'NVILAExtractor',
}
specs = []
for mtype, scales in sa.MODEL_CONFIGS.items():
if model_type_filter and mtype not in model_type_filter:
continue
cls_name = extractor_cls.get(mtype)
if cls_name is None:
continue
for scale, path in scales.items():
actual_cls = (
'RoboReferExtractor'
if mtype == 'nvila' and scale == 'roborefer'
else cls_name
)
specs.append({
'model_type': mtype,
'scale': scale,
'path': path,
'cls_name': actual_cls,
})
return specs
# =============================================================================
# ํ”„๋กœ๋ธŒ ์‹คํ–‰
# =============================================================================
def probe_model(spec, samples, device, log_fh):
"""ํ•œ ๋ชจ๋ธ/์Šค์ผ€์ผ์— ๋Œ€ํ•ด ์ „์ฒด ์ƒ˜ํ”Œ inference ํ›„ ๊ฒฐ๊ณผ ์ถœ๋ ฅ."""
import swap_analysis as sa
from PIL import Image
def _img(b64):
return Image.open(BytesIO(base64.b64decode(b64))).convert('RGB')
header = (f"\n{'โ”'*62}\n"
f" MODEL : {spec['model_type']} / {spec['scale']}\n"
f" PATH : {spec['path']}\n"
f"{'โ”'*62}")
_log(header, log_fh)
# ๋ชจ๋ธ ๋กœ๋“œ
try:
cls = sa.EXTRACTOR_CLASSES.get(spec['cls_name'])
if cls is None:
_log(f" [SKIP] ์•Œ ์ˆ˜ ์—†๋Š” extractor: {spec['cls_name']}", log_fh)
return
# target_layers=[] โ†’ hidden state ์ˆ˜์ง‘ ์—†์ด prediction๋งŒ ์–ป์Œ
extractor = cls(spec['path'], device=device, target_layers=[])
except Exception as e:
_log(f" [SKIP] ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}", log_fh)
return
correct = {'far': 0, 'close': 0}
total = {'far': 0, 'close': 0}
# variant ๊ต๋Œ€: ์ง์ˆ˜=far_first, ํ™€์ˆ˜=close_first โ†’ A/B position bias ์ƒ์‡„
for i, s in enumerate(samples):
variant = 'far_first' if i % 2 == 0 else 'close_first'
mcq_map = _MCQ_LETTER[variant]
question = QUESTION_TEMPLATES[variant].format(subj=s['subj'], ref=s['ref'])
try:
image = _img(s['image_base64'])
_, pred = extractor.extract_and_predict(image, question)
except Exception as e:
pred = f"[ERROR: {e}]"
ok = check(pred, s['category'], mcq_map)
mark = 'โœ…' if ok else 'โŒ'
if ok:
correct[s['category']] += 1
total[s['category']] += 1
line = (f" [{s['category']:5}][{variant}] subj={s['subj']!r:25s} ref={s['ref']!r}\n"
f" Q: {question}\n"
f" A: {pred!r} {mark}")
_log(line, log_fh)
nf, nc = total['far'], total['close']
summary = (f"\n โ”€โ”€ SUMMARY โ”€โ”€ "
f"far={correct['far']}/{nf} "
f"close={correct['close']}/{nc} "
f"total={correct['far']+correct['close']}/{nf+nc}\n")
_log(summary, log_fh)
# ๋ฉ”๋ชจ๋ฆฌ ํ•ด์ œ
extractor.cleanup()
gc.collect()
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
except ImportError:
pass
def _log(msg, fh):
print(msg)
fh.write(msg + '\n')
fh.flush()
# =============================================================================
# Main
# =============================================================================
def main():
parser = argparse.ArgumentParser(
description='far/close ํ”„๋กฌํ”„ํŠธ ์—”์ง€๋‹ˆ์–ด๋ง ํ…Œ์ŠคํŠธ ์Šคํฌ๋ฆฝํŠธ')
parser.add_argument('--data_type', choices=['real', 'synthetic'], required=True,
help='real=EmbSpatialBench, synthetic=synthetic/2body')
parser.add_argument('--model_type', nargs='+', default=None,
metavar='TYPE',
help='ํ…Œ์ŠคํŠธํ•  ๋ชจ๋ธ ํŒจ๋ฐ€๋ฆฌ (molmo / nvila / qwen). '
'๊ธฐ๋ณธ๊ฐ’: ์„ค์ •๋œ ์ „์ฒด ๋ชจ๋ธ. ํ™˜๊ฒฝ์— ์—†๋Š” ๋ชจ๋ธ์€ ์ž๋™ ์Šคํ‚ต.')
parser.add_argument('--n_samples', type=int, default=N_SAMPLES,
help=f'์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ƒ˜ํ”Œ ์ˆ˜ (๊ธฐ๋ณธ๊ฐ’: {N_SAMPLES})')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=SEED)
args = parser.parse_args()
os.makedirs(OUTPUT_ROOT, exist_ok=True)
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
model_tag = '_'.join(args.model_type) if args.model_type else 'all'
log_path = os.path.join(OUTPUT_ROOT, f'{args.data_type}_{model_tag}_{ts}.log')
# ์ƒ˜ํ”Œ ๋กœ๋”ฉ
if args.data_type == 'real':
samples = load_real_samples(args.n_samples, args.seed)
else:
samples = load_synthetic_samples(args.n_samples, args.seed)
# ๋ชจ๋ธ ์ŠคํŽ™
specs = get_model_specs(args.model_type)
if not specs:
print(f"[ERROR] ํ•ด๋‹นํ•˜๋Š” ๋ชจ๋ธ์ด ์—†์Šต๋‹ˆ๋‹ค: {args.model_type}")
sys.exit(1)
with open(log_path, 'w', encoding='utf-8') as log_fh:
models_str = ', '.join(f"{s['model_type']}/{s['scale']}" for s in specs)
banner = (
f"{'='*62}\n"
f"PROMPT TEMPLATES (A/B order alternated per sample):\n"
f" far_first : {QUESTION_TEMPLATES['far_first']}\n"
f" close_first: {QUESTION_TEMPLATES['close_first']}\n\n"
f"DATA : {args.data_type} | {args.n_samples} far + {args.n_samples} close "
f"(seed={args.seed})\n"
f"MODELS: {models_str}\n"
f"TIME : {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
f"{'='*62}"
)
_log(banner, log_fh)
for spec in specs:
probe_model(spec, samples, args.device, log_fh)
footer = (f"\n{'='*62}\n"
f"๋กœ๊ทธ ์ €์žฅ ์™„๋ฃŒ: {log_path}\n"
f"{'='*62}\n")
_log(footer, log_fh)
if __name__ == '__main__':
main()