| |
| """ |
| probe_distance_prompt.py |
| ======================== |
| far / close ์ง๋ฌธ ํ๋กฌํํธ๋ฅผ ๋ฐ๊ฟ๊ฐ๋ฉฐ ๋ชจ๋ ๋ชจ๋ธ์ ์๋ต์ ๋น๊ตํ๋ ํ
์คํธ ์คํฌ๋ฆฝํธ. |
| |
| QUESTION_TEMPLATE (์๋)์ ์์ ํ ์คํ: |
| python probe_distance_prompt.py --data_type real --model_type nvila |
| python probe_distance_prompt.py --data_type synthetic --model_type molmo |
| python probe_distance_prompt.py --data_type real --model_type nvila molmo qwen |
| |
| ํ๊ฒฝ๋ณ ์คํ: |
| vila env โ nvila ๋ชจ๋ธ |
| molmo env โ molmo ๋ชจ๋ธ |
| system โ qwen ๋ชจ๋ธ |
| |
| ๊ฒฐ๊ณผ: logs/probe_distance/YYYYMMDD_HHMMSS_<data_type>.log |
| """ |
|
|
| import os, sys, json, random, argparse, gc |
| from datetime import datetime |
| from io import BytesIO |
| import base64 |
|
|
| |
| _HERE = os.path.dirname(os.path.abspath(__file__)) |
| _SA_DIR = os.path.join(_HERE, 'swap_analysis') |
| sys.path.insert(0, _SA_DIR) |
|
|
| |
| |
| |
| |
| |
| |
| |
| _Q_BASE = "Compared to {ref}, is {subj} far or close from you? " |
| _Q_TAIL = "Answer with a single letter A or B." |
| QUESTION_TEMPLATES = { |
| 'far_first': _Q_BASE + "(A) far (B) close " + _Q_TAIL, |
| 'close_first': _Q_BASE + "(A) close (B) far " + _Q_TAIL, |
| } |
| |
|
|
| N_SAMPLES = 10 |
| SEED = 42 |
| REAL_TSV = '/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv' |
| SYNTH_DIR = '/data/shared/Qwen/synthetic/2body' |
| OUTPUT_ROOT = os.path.join(_HERE, 'logs', 'probe_distance') |
|
|
| |
| _SYNONYMS = { |
| 'far': ['farther', 'further', 'distant', 'far away', 'further away'], |
| 'close': ['closer', 'near', 'nearby', 'nearer'], |
| } |
|
|
| |
| _MCQ_LETTER = { |
| 'far_first': {'far': 'a', 'close': 'b'}, |
| 'close_first': {'close': 'a', 'far': 'b'}, |
| } |
|
|
| OPPOSITE = {'far': 'close', 'close': 'far'} |
|
|
|
|
| |
| |
| |
|
|
| def check(pred: str, expected: str, mcq_map: dict) -> bool: |
| """expected ๋จ์ด ๋๋ MCQ ์ ํ์ง(A/B)๊ฐ opposite๋ณด๋ค ๋จผ์ ๋์ค๋ฉด ์ ๋ต. |
| mcq_map: variant๋ณ letter ๋งคํ (์: {'far': 'a', 'close': 'b'}) |
| """ |
| t = pred.strip().lower() |
| opposite = OPPOSITE[expected] |
|
|
| exp_letter = mcq_map[expected] |
| opp_letter = mcq_map[opposite] |
|
|
| |
| if t in (exp_letter, exp_letter + '.', exp_letter + ')', exp_letter + ','): |
| return True |
| if t in (opp_letter, opp_letter + '.', opp_letter + ')', opp_letter + ','): |
| return False |
|
|
| |
| mcq_exp = f'({exp_letter})' |
| mcq_opp = f'({opp_letter})' |
|
|
| def earliest(word, extra=()): |
| pos = t.find(word) |
| for syn in list(_SYNONYMS.get(word, [])) + list(extra): |
| q = t.find(syn) |
| if q != -1: |
| pos = q if pos == -1 else min(pos, q) |
| return pos |
|
|
| pe = earliest(expected, (mcq_exp,)) |
| po = earliest(opposite, (mcq_opp,)) |
| return pe != -1 and (po == -1 or pe < po) |
|
|
|
|
| |
| |
| |
|
|
| def load_real_samples(n: int, seed: int): |
| """EmbSpatialBench TSV์์ far/close ์ํ ๊ฐ n๊ฐ ๋ฐํ.""" |
| import swap_analysis as sa |
| rng = random.Random(seed) |
| pairs = sa.load_swap_pairs(REAL_TSV, seed=seed) |
|
|
| def _valid(p): |
| """reference_object๋ target_object๊ฐ 'Unknown' ๋๋ ๋น์ด์์ผ๋ฉด ์ ์ธ.""" |
| for field in ('target_object', 'reference_object'): |
| v = p.get(field, '') |
| if not v or v.strip().lower() in ('unknown', 'n/a', ''): |
| return False |
| return True |
|
|
| far_pairs = [p for p in pairs if p['category'] == 'far' and _valid(p)] |
| close_pairs = [p for p in pairs if p['category'] == 'close' and _valid(p)] |
| rng.shuffle(far_pairs) |
| rng.shuffle(close_pairs) |
|
|
| samples = [] |
| for p in far_pairs[:n]: |
| samples.append({ |
| 'category': 'far', |
| 'subj': p['target_object'], |
| 'ref': p['reference_object'], |
| 'image_base64': p['image_base64'], |
| }) |
| for p in close_pairs[:n]: |
| samples.append({ |
| 'category': 'close', |
| 'subj': p['target_object'], |
| 'ref': p['reference_object'], |
| 'image_base64': p['image_base64'], |
| }) |
| return samples |
|
|
|
|
| def load_synthetic_samples(n: int, seed: int): |
| """synthetic/2body/far,close ์์ ๊ฐ n๊ฐ ๋ฐํ.""" |
| rng = random.Random(seed) |
| samples = [] |
| for folder, cat in [('far', 'far'), ('close', 'close')]: |
| folder_path = os.path.join(SYNTH_DIR, folder) |
| json_path = os.path.join(folder_path, 'vqa.json') |
| with open(json_path) as f: |
| entries = json.load(f) |
| rng.shuffle(entries) |
| for entry in entries[:n]: |
| img_path = os.path.join(folder_path, entry['image']) |
| with open(img_path, 'rb') as f: |
| b64 = base64.b64encode(f.read()).decode('utf-8') |
| obj1 = entry['obj1'] |
| obj2 = entry['obj2'] |
| subj = f"{obj1['color']} {obj1['shape']}" |
| ref = f"{obj2['color']} {obj2['shape']}" |
| samples.append({ |
| 'category': cat, |
| 'subj': subj, |
| 'ref': ref, |
| 'image_base64': b64, |
| }) |
| return samples |
|
|
|
|
| |
| |
| |
|
|
| def get_model_specs(model_type_filter=None): |
| """MODEL_CONFIGS ๊ธฐ๋ฐ (model_type, scale, path, cls_name) ๋ชฉ๋ก ๋ฐํ.""" |
| import swap_analysis as sa |
|
|
| extractor_cls = { |
| 'molmo': 'MolmoExtractor', |
| 'nvila': 'NVILAExtractor', |
| 'qwen': 'Qwen25VLExtractor', |
| 'nvila_synthetic': 'NVILAExtractor', |
| } |
| specs = [] |
| for mtype, scales in sa.MODEL_CONFIGS.items(): |
| if model_type_filter and mtype not in model_type_filter: |
| continue |
| cls_name = extractor_cls.get(mtype) |
| if cls_name is None: |
| continue |
| for scale, path in scales.items(): |
| actual_cls = ( |
| 'RoboReferExtractor' |
| if mtype == 'nvila' and scale == 'roborefer' |
| else cls_name |
| ) |
| specs.append({ |
| 'model_type': mtype, |
| 'scale': scale, |
| 'path': path, |
| 'cls_name': actual_cls, |
| }) |
| return specs |
|
|
|
|
| |
| |
| |
|
|
| def probe_model(spec, samples, device, log_fh): |
| """ํ ๋ชจ๋ธ/์ค์ผ์ผ์ ๋ํด ์ ์ฒด ์ํ inference ํ ๊ฒฐ๊ณผ ์ถ๋ ฅ.""" |
| import swap_analysis as sa |
| from PIL import Image |
|
|
| def _img(b64): |
| return Image.open(BytesIO(base64.b64decode(b64))).convert('RGB') |
|
|
| header = (f"\n{'โ'*62}\n" |
| f" MODEL : {spec['model_type']} / {spec['scale']}\n" |
| f" PATH : {spec['path']}\n" |
| f"{'โ'*62}") |
| _log(header, log_fh) |
|
|
| |
| try: |
| cls = sa.EXTRACTOR_CLASSES.get(spec['cls_name']) |
| if cls is None: |
| _log(f" [SKIP] ์ ์ ์๋ extractor: {spec['cls_name']}", log_fh) |
| return |
| |
| extractor = cls(spec['path'], device=device, target_layers=[]) |
| except Exception as e: |
| _log(f" [SKIP] ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}", log_fh) |
| return |
|
|
| correct = {'far': 0, 'close': 0} |
| total = {'far': 0, 'close': 0} |
|
|
| |
| for i, s in enumerate(samples): |
| variant = 'far_first' if i % 2 == 0 else 'close_first' |
| mcq_map = _MCQ_LETTER[variant] |
| question = QUESTION_TEMPLATES[variant].format(subj=s['subj'], ref=s['ref']) |
| try: |
| image = _img(s['image_base64']) |
| _, pred = extractor.extract_and_predict(image, question) |
| except Exception as e: |
| pred = f"[ERROR: {e}]" |
|
|
| ok = check(pred, s['category'], mcq_map) |
| mark = 'โ
' if ok else 'โ' |
| if ok: |
| correct[s['category']] += 1 |
| total[s['category']] += 1 |
|
|
| line = (f" [{s['category']:5}][{variant}] subj={s['subj']!r:25s} ref={s['ref']!r}\n" |
| f" Q: {question}\n" |
| f" A: {pred!r} {mark}") |
| _log(line, log_fh) |
|
|
| nf, nc = total['far'], total['close'] |
| summary = (f"\n โโ SUMMARY โโ " |
| f"far={correct['far']}/{nf} " |
| f"close={correct['close']}/{nc} " |
| f"total={correct['far']+correct['close']}/{nf+nc}\n") |
| _log(summary, log_fh) |
|
|
| |
| extractor.cleanup() |
| gc.collect() |
| try: |
| import torch |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
| except ImportError: |
| pass |
|
|
|
|
| def _log(msg, fh): |
| print(msg) |
| fh.write(msg + '\n') |
| fh.flush() |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser( |
| description='far/close ํ๋กฌํํธ ์์ง๋์ด๋ง ํ
์คํธ ์คํฌ๋ฆฝํธ') |
| parser.add_argument('--data_type', choices=['real', 'synthetic'], required=True, |
| help='real=EmbSpatialBench, synthetic=synthetic/2body') |
| parser.add_argument('--model_type', nargs='+', default=None, |
| metavar='TYPE', |
| help='ํ
์คํธํ ๋ชจ๋ธ ํจ๋ฐ๋ฆฌ (molmo / nvila / qwen). ' |
| '๊ธฐ๋ณธ๊ฐ: ์ค์ ๋ ์ ์ฒด ๋ชจ๋ธ. ํ๊ฒฝ์ ์๋ ๋ชจ๋ธ์ ์๋ ์คํต.') |
| parser.add_argument('--n_samples', type=int, default=N_SAMPLES, |
| help=f'์นดํ
๊ณ ๋ฆฌ๋ณ ์ํ ์ (๊ธฐ๋ณธ๊ฐ: {N_SAMPLES})') |
| parser.add_argument('--device', type=str, default='cuda') |
| parser.add_argument('--seed', type=int, default=SEED) |
| args = parser.parse_args() |
|
|
| os.makedirs(OUTPUT_ROOT, exist_ok=True) |
| ts = datetime.now().strftime('%Y%m%d_%H%M%S') |
| model_tag = '_'.join(args.model_type) if args.model_type else 'all' |
| log_path = os.path.join(OUTPUT_ROOT, f'{args.data_type}_{model_tag}_{ts}.log') |
|
|
| |
| if args.data_type == 'real': |
| samples = load_real_samples(args.n_samples, args.seed) |
| else: |
| samples = load_synthetic_samples(args.n_samples, args.seed) |
|
|
| |
| specs = get_model_specs(args.model_type) |
| if not specs: |
| print(f"[ERROR] ํด๋นํ๋ ๋ชจ๋ธ์ด ์์ต๋๋ค: {args.model_type}") |
| sys.exit(1) |
|
|
| with open(log_path, 'w', encoding='utf-8') as log_fh: |
| models_str = ', '.join(f"{s['model_type']}/{s['scale']}" for s in specs) |
| banner = ( |
| f"{'='*62}\n" |
| f"PROMPT TEMPLATES (A/B order alternated per sample):\n" |
| f" far_first : {QUESTION_TEMPLATES['far_first']}\n" |
| f" close_first: {QUESTION_TEMPLATES['close_first']}\n\n" |
| f"DATA : {args.data_type} | {args.n_samples} far + {args.n_samples} close " |
| f"(seed={args.seed})\n" |
| f"MODELS: {models_str}\n" |
| f"TIME : {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n" |
| f"{'='*62}" |
| ) |
| _log(banner, log_fh) |
|
|
| for spec in specs: |
| probe_model(spec, samples, args.device, log_fh) |
|
|
| footer = (f"\n{'='*62}\n" |
| f"๋ก๊ทธ ์ ์ฅ ์๋ฃ: {log_path}\n" |
| f"{'='*62}\n") |
| _log(footer, log_fh) |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|