#!/usr/bin/env python3 """ probe_distance_prompt.py ======================== far / close 질문 프롬프트를 바꿔가며 모든 모델의 응답을 비교하는 테스트 스크립트. QUESTION_TEMPLATE (아래)을 수정 후 실행: python probe_distance_prompt.py --data_type real --model_type nvila python probe_distance_prompt.py --data_type synthetic --model_type molmo python probe_distance_prompt.py --data_type real --model_type nvila molmo qwen 환경별 실행: vila env → nvila 모델 molmo env → molmo 모델 system → qwen 모델 결과: logs/probe_distance/YYYYMMDD_HHMMSS_.log """ import os, sys, json, random, argparse, gc from datetime import datetime from io import BytesIO import base64 # ─── sys.path (swap_analysis import) ───────────────────────────────────────── _HERE = os.path.dirname(os.path.abspath(__file__)) _SA_DIR = os.path.join(_HERE, 'swap_analysis') sys.path.insert(0, _SA_DIR) # ============================================================================= # {subj} = 거리를 평가할 대상 객체 # {ref} = 기준이 되는 참조 객체 # ============================================================================= # 선택지 순서를 샘플마다 교대해 A/B position bias를 상쇄 # far_first (짝수 샘플): (A) far (B) close # close_first (홀수 샘플): (A) close (B) far _Q_BASE = "Compared to {ref}, is {subj} far or close from you? " _Q_TAIL = "Answer with a single letter A or B." QUESTION_TEMPLATES = { 'far_first': _Q_BASE + "(A) far (B) close " + _Q_TAIL, 'close_first': _Q_BASE + "(A) close (B) far " + _Q_TAIL, } # ============================================================================= N_SAMPLES = 10 # 카테고리(far / close)별 샘플 수 SEED = 42 REAL_TSV = '/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv' SYNTH_DIR = '/data/shared/Qwen/synthetic/2body' OUTPUT_ROOT = os.path.join(_HERE, 'logs', 'probe_distance') # 정답 인식 동의어 (MCQ "(a)"/"(b)" 패턴은 check()에서 variant별로 동적 처리) _SYNONYMS = { 'far': ['farther', 'further', 'distant', 'far away', 'further away'], 'close': ['closer', 'near', 'nearby', 'nearer'], } # MCQ 선택지 매핑 (variant별): 어느 template을 썼는지에 따라 A/B가 달라짐 _MCQ_LETTER = { 'far_first': {'far': 'a', 'close': 'b'}, 'close_first': {'close': 'a', 'far': 'b'}, } OPPOSITE = {'far': 'close', 'close': 'far'} # ============================================================================= # 정답 체크 # ============================================================================= def check(pred: str, expected: str, mcq_map: dict) -> bool: """expected 단어 또는 MCQ 선택지(A/B)가 opposite보다 먼저 나오면 정답. mcq_map: variant별 letter 매핑 (예: {'far': 'a', 'close': 'b'}) """ t = pred.strip().lower() opposite = OPPOSITE[expected] exp_letter = mcq_map[expected] opp_letter = mcq_map[opposite] # 단독 알파벳 선택지 응답 처리 (e.g. "A", "A.", "A)", "a") if t in (exp_letter, exp_letter + '.', exp_letter + ')', exp_letter + ','): return True if t in (opp_letter, opp_letter + '.', opp_letter + ')', opp_letter + ','): return False # MCQ 인라인 패턴 "(a)"/"(b)"은 variant에 따라 동적으로 결정 mcq_exp = f'({exp_letter})' mcq_opp = f'({opp_letter})' def earliest(word, extra=()): pos = t.find(word) for syn in list(_SYNONYMS.get(word, [])) + list(extra): q = t.find(syn) if q != -1: pos = q if pos == -1 else min(pos, q) return pos pe = earliest(expected, (mcq_exp,)) po = earliest(opposite, (mcq_opp,)) return pe != -1 and (po == -1 or pe < po) # ============================================================================= # 데이터 로딩 # ============================================================================= def load_real_samples(n: int, seed: int): """EmbSpatialBench TSV에서 far/close 샘플 각 n개 반환.""" import swap_analysis as sa rng = random.Random(seed) pairs = sa.load_swap_pairs(REAL_TSV, seed=seed) def _valid(p): """reference_object나 target_object가 'Unknown' 또는 비어있으면 제외.""" for field in ('target_object', 'reference_object'): v = p.get(field, '') if not v or v.strip().lower() in ('unknown', 'n/a', ''): return False return True far_pairs = [p for p in pairs if p['category'] == 'far' and _valid(p)] close_pairs = [p for p in pairs if p['category'] == 'close' and _valid(p)] rng.shuffle(far_pairs) rng.shuffle(close_pairs) samples = [] for p in far_pairs[:n]: samples.append({ 'category': 'far', 'subj': p['target_object'], 'ref': p['reference_object'], 'image_base64': p['image_base64'], }) for p in close_pairs[:n]: samples.append({ 'category': 'close', 'subj': p['target_object'], 'ref': p['reference_object'], 'image_base64': p['image_base64'], }) return samples def load_synthetic_samples(n: int, seed: int): """synthetic/2body/far,close 에서 각 n개 반환.""" rng = random.Random(seed) samples = [] for folder, cat in [('far', 'far'), ('close', 'close')]: folder_path = os.path.join(SYNTH_DIR, folder) json_path = os.path.join(folder_path, 'vqa.json') with open(json_path) as f: entries = json.load(f) rng.shuffle(entries) for entry in entries[:n]: img_path = os.path.join(folder_path, entry['image']) with open(img_path, 'rb') as f: b64 = base64.b64encode(f.read()).decode('utf-8') obj1 = entry['obj1'] obj2 = entry['obj2'] subj = f"{obj1['color']} {obj1['shape']}" ref = f"{obj2['color']} {obj2['shape']}" samples.append({ 'category': cat, 'subj': subj, 'ref': ref, 'image_base64': b64, }) return samples # ============================================================================= # 모델 스펙 목록 # ============================================================================= def get_model_specs(model_type_filter=None): """MODEL_CONFIGS 기반 (model_type, scale, path, cls_name) 목록 반환.""" import swap_analysis as sa extractor_cls = { 'molmo': 'MolmoExtractor', 'nvila': 'NVILAExtractor', 'qwen': 'Qwen25VLExtractor', 'nvila_synthetic': 'NVILAExtractor', } specs = [] for mtype, scales in sa.MODEL_CONFIGS.items(): if model_type_filter and mtype not in model_type_filter: continue cls_name = extractor_cls.get(mtype) if cls_name is None: continue for scale, path in scales.items(): actual_cls = ( 'RoboReferExtractor' if mtype == 'nvila' and scale == 'roborefer' else cls_name ) specs.append({ 'model_type': mtype, 'scale': scale, 'path': path, 'cls_name': actual_cls, }) return specs # ============================================================================= # 프로브 실행 # ============================================================================= def probe_model(spec, samples, device, log_fh): """한 모델/스케일에 대해 전체 샘플 inference 후 결과 출력.""" import swap_analysis as sa from PIL import Image def _img(b64): return Image.open(BytesIO(base64.b64decode(b64))).convert('RGB') header = (f"\n{'━'*62}\n" f" MODEL : {spec['model_type']} / {spec['scale']}\n" f" PATH : {spec['path']}\n" f"{'━'*62}") _log(header, log_fh) # 모델 로드 try: cls = sa.EXTRACTOR_CLASSES.get(spec['cls_name']) if cls is None: _log(f" [SKIP] 알 수 없는 extractor: {spec['cls_name']}", log_fh) return # target_layers=[] → hidden state 수집 없이 prediction만 얻음 extractor = cls(spec['path'], device=device, target_layers=[]) except Exception as e: _log(f" [SKIP] 모델 로드 실패: {e}", log_fh) return correct = {'far': 0, 'close': 0} total = {'far': 0, 'close': 0} # variant 교대: 짝수=far_first, 홀수=close_first → A/B position bias 상쇄 for i, s in enumerate(samples): variant = 'far_first' if i % 2 == 0 else 'close_first' mcq_map = _MCQ_LETTER[variant] question = QUESTION_TEMPLATES[variant].format(subj=s['subj'], ref=s['ref']) try: image = _img(s['image_base64']) _, pred = extractor.extract_and_predict(image, question) except Exception as e: pred = f"[ERROR: {e}]" ok = check(pred, s['category'], mcq_map) mark = '✅' if ok else '❌' if ok: correct[s['category']] += 1 total[s['category']] += 1 line = (f" [{s['category']:5}][{variant}] subj={s['subj']!r:25s} ref={s['ref']!r}\n" f" Q: {question}\n" f" A: {pred!r} {mark}") _log(line, log_fh) nf, nc = total['far'], total['close'] summary = (f"\n ── SUMMARY ── " f"far={correct['far']}/{nf} " f"close={correct['close']}/{nc} " f"total={correct['far']+correct['close']}/{nf+nc}\n") _log(summary, log_fh) # 메모리 해제 extractor.cleanup() gc.collect() try: import torch if torch.cuda.is_available(): torch.cuda.empty_cache() except ImportError: pass def _log(msg, fh): print(msg) fh.write(msg + '\n') fh.flush() # ============================================================================= # Main # ============================================================================= def main(): parser = argparse.ArgumentParser( description='far/close 프롬프트 엔지니어링 테스트 스크립트') parser.add_argument('--data_type', choices=['real', 'synthetic'], required=True, help='real=EmbSpatialBench, synthetic=synthetic/2body') parser.add_argument('--model_type', nargs='+', default=None, metavar='TYPE', help='테스트할 모델 패밀리 (molmo / nvila / qwen). ' '기본값: 설정된 전체 모델. 환경에 없는 모델은 자동 스킵.') parser.add_argument('--n_samples', type=int, default=N_SAMPLES, help=f'카테고리별 샘플 수 (기본값: {N_SAMPLES})') parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--seed', type=int, default=SEED) args = parser.parse_args() os.makedirs(OUTPUT_ROOT, exist_ok=True) ts = datetime.now().strftime('%Y%m%d_%H%M%S') model_tag = '_'.join(args.model_type) if args.model_type else 'all' log_path = os.path.join(OUTPUT_ROOT, f'{args.data_type}_{model_tag}_{ts}.log') # 샘플 로딩 if args.data_type == 'real': samples = load_real_samples(args.n_samples, args.seed) else: samples = load_synthetic_samples(args.n_samples, args.seed) # 모델 스펙 specs = get_model_specs(args.model_type) if not specs: print(f"[ERROR] 해당하는 모델이 없습니다: {args.model_type}") sys.exit(1) with open(log_path, 'w', encoding='utf-8') as log_fh: models_str = ', '.join(f"{s['model_type']}/{s['scale']}" for s in specs) banner = ( f"{'='*62}\n" f"PROMPT TEMPLATES (A/B order alternated per sample):\n" f" far_first : {QUESTION_TEMPLATES['far_first']}\n" f" close_first: {QUESTION_TEMPLATES['close_first']}\n\n" f"DATA : {args.data_type} | {args.n_samples} far + {args.n_samples} close " f"(seed={args.seed})\n" f"MODELS: {models_str}\n" f"TIME : {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n" f"{'='*62}" ) _log(banner, log_fh) for spec in specs: probe_model(spec, samples, args.device, log_fh) footer = (f"\n{'='*62}\n" f"로그 저장 완료: {log_path}\n" f"{'='*62}\n") _log(footer, log_fh) if __name__ == '__main__': main()