| |
| """ |
| Answer/Prediction Bias ๋ถ์ ์คํฌ๋ฆฝํธ |
| |
| 1. GT Answer Distribution: ์ ๋ต์ด A, B, C, D ์ค ์ด๋์ ํธ์ค๋์ด ์๋์ง |
| 2. Model Prediction Distribution: ๋ชจ๋ธ์ด ํน์ ์ ํ์ง๋ฅผ ๋ ๋ง์ด ์ ํํ๋์ง |
| |
| Usage: |
| python experiments/analyze_answer_bias.py <model_result.xlsx> [--subset far_close] |
| python experiments/analyze_answer_bias.py --compare <file1.xlsx> <file2.xlsx> ... |
| """ |
|
|
| import argparse |
| import sys |
| import pandas as pd |
| import numpy as np |
| from pathlib import Path |
| from typing import Dict, List |
| from collections import Counter |
|
|
|
|
| class TeeWriter: |
| """stdout์ ํฐ๋ฏธ๋๊ณผ ํ์ผ์ ๋์์ ์ถ๋ ฅ""" |
| def __init__(self, filepath): |
| self.terminal = sys.stdout |
| self.file = open(filepath, 'w', encoding='utf-8') |
|
|
| def write(self, message): |
| self.terminal.write(message) |
| self.file.write(message) |
|
|
| def flush(self): |
| self.terminal.flush() |
| self.file.flush() |
|
|
| def close(self): |
| self.file.close() |
| return self.terminal |
|
|
|
|
| def extract_answer_letter(val) -> str: |
| """์์ธก๊ฐ์์ A/B/C/D ์ถ์ถ (์: 'D. basket' -> 'D')""" |
| if pd.isna(val): |
| return 'INVALID' |
| val = str(val).strip() |
| if len(val) == 0: |
| return 'INVALID' |
| first_char = val[0].upper() |
| if first_char in ['A', 'B', 'C', 'D']: |
| return first_char |
| return 'INVALID' |
|
|
|
|
| def analyze_bias(df: pd.DataFrame, subset_name: str = "ALL") -> Dict: |
| """ |
| Answer/Prediction bias ๋ถ์ |
| """ |
| |
| gt_dist = Counter(df['answer']) |
| gt_total = sum(gt_dist.values()) |
|
|
| |
| pred_letters = df['prediction'].apply(extract_answer_letter) |
| pred_dist = Counter(pred_letters) |
| pred_total = sum(v for k, v in pred_dist.items() if k != 'INVALID') |
|
|
| |
| acc_by_pos = {} |
| for ans in ['A', 'B', 'C', 'D']: |
| subset = df[df['answer'] == ans] |
| if len(subset) > 0: |
| acc_by_pos[ans] = subset['hit'].mean() * 100 |
| else: |
| acc_by_pos[ans] = 0 |
|
|
| |
| |
| df_with_pred_letter = df.copy() |
| df_with_pred_letter['pred_letter'] = pred_letters.values |
| hit_by_pred = {} |
| for pred in ['A', 'B', 'C', 'D']: |
| subset = df_with_pred_letter[df_with_pred_letter['pred_letter'] == pred] |
| if len(subset) > 0: |
| hit_by_pred[pred] = subset['hit'].mean() * 100 |
| else: |
| hit_by_pred[pred] = 0 |
|
|
| return { |
| 'subset': subset_name, |
| 'total': len(df), |
| 'gt_dist': {k: gt_dist.get(k, 0) for k in ['A', 'B', 'C', 'D']}, |
| 'gt_pct': {k: gt_dist.get(k, 0) / gt_total * 100 if gt_total > 0 else 0 for k in ['A', 'B', 'C', 'D']}, |
| 'pred_dist': {k: pred_dist.get(k, 0) for k in ['A', 'B', 'C', 'D']}, |
| 'pred_pct': {k: pred_dist.get(k, 0) / pred_total * 100 if pred_total > 0 else 0 for k in ['A', 'B', 'C', 'D']}, |
| 'acc_by_gt_pos': acc_by_pos, |
| 'hit_by_pred_pos': hit_by_pred, |
| 'overall_acc': df['hit'].mean() * 100 |
| } |
|
|
|
|
| def print_bias_report(xlsx_path: str, results: List[Dict]): |
| """Bias ๋ถ์ ๋ฆฌํฌํธ ์ถ๋ ฅ""" |
| model_name = Path(xlsx_path).stem.replace('_EmbSpatialBench_openai_result', '') |
| |
| if len(model_name) > 50: |
| model_name = model_name[:47] + "..." |
|
|
| print(f"\n{'='*80}") |
| print(f"Model: {model_name}") |
| print(f"{'='*80}") |
|
|
| for r in results: |
| print(f"\n--- {r['subset']} (n={r['total']}) ---") |
|
|
| |
| print(f"\n GT Answer Distribution:") |
| print(f" {'Pos':<5} {'Count':<8} {'Pct':<8} {'Acc when GT':<12}") |
| print(f" {'-'*35}") |
| for pos in ['A', 'B', 'C', 'D']: |
| print(f" {pos:<5} {r['gt_dist'][pos]:<8} {r['gt_pct'][pos]:.1f}%{'':<4} {r['acc_by_gt_pos'][pos]:.1f}%") |
|
|
| |
| print(f"\n Model Prediction Distribution:") |
| print(f" {'Pos':<5} {'Count':<8} {'Pct':<8} {'Acc when Pred':<12}") |
| print(f" {'-'*35}") |
| for pos in ['A', 'B', 'C', 'D']: |
| print(f" {pos:<5} {r['pred_dist'][pos]:<8} {r['pred_pct'][pos]:.1f}%{'':<4} {r['hit_by_pred_pos'][pos]:.1f}%") |
|
|
| |
| gt_std = np.std([r['gt_pct'][p] for p in ['A', 'B', 'C', 'D']]) |
| pred_std = np.std([r['pred_pct'][p] for p in ['A', 'B', 'C', 'D']]) |
|
|
| print(f"\n Bias Indicators:") |
| print(f" GT Distribution Std: {gt_std:.2f}%p (uniform=0)") |
| print(f" Pred Distribution Std: {pred_std:.2f}%p (uniform=0)") |
| print(f" Overall Accuracy: {r['overall_acc']:.1f}%") |
|
|
|
|
| def analyze_model(xlsx_path: str, include_subsets: bool = True) -> List[Dict]: |
| """๋ชจ๋ธ ๊ฒฐ๊ณผ ๋ถ์""" |
| df = pd.read_excel(xlsx_path) |
|
|
| results = [] |
|
|
| |
| results.append(analyze_bias(df, "ALL")) |
|
|
| if include_subsets: |
| |
| far_close_df = df[df['category'].isin(['far', 'close'])] |
| if len(far_close_df) > 0: |
| results.append(analyze_bias(far_close_df, "FAR+CLOSE")) |
|
|
| |
| for cat in ['far', 'close']: |
| cat_df = df[df['category'] == cat] |
| if len(cat_df) > 0: |
| results.append(analyze_bias(cat_df, cat.upper())) |
|
|
| return results |
|
|
|
|
| def compare_models_bias(xlsx_paths: List[str]): |
| """์ฌ๋ฌ ๋ชจ๋ธ์ bias ๋น๊ต (์์ฝ ํ
์ด๋ธ)""" |
|
|
| print(f"\n{'='*100}") |
| print("MODEL BIAS COMPARISON SUMMARY") |
| print(f"{'='*100}") |
|
|
| |
| print(f"\n{'Model':<45} {'Subset':<12} {'GT Std':<10} {'Pred Std':<10} {'Pred Max':<12} {'Acc':<8}") |
| print("-" * 97) |
|
|
| for xlsx_path in xlsx_paths: |
| model_name = Path(xlsx_path).stem.replace('_EmbSpatialBench_openai_result', '') |
| if len(model_name) > 43: |
| model_name = model_name[:40] + "..." |
|
|
| results = analyze_model(xlsx_path, include_subsets=True) |
|
|
| for r in results: |
| gt_std = np.std([r['gt_pct'][p] for p in ['A', 'B', 'C', 'D']]) |
| pred_std = np.std([r['pred_pct'][p] for p in ['A', 'B', 'C', 'D']]) |
|
|
| |
| max_pred_pos = max(r['pred_pct'], key=r['pred_pct'].get) |
| max_pred_pct = r['pred_pct'][max_pred_pos] |
|
|
| if r['subset'] == 'ALL': |
| print(f"{model_name:<45} {r['subset']:<12} {gt_std:.1f}%p{'':<4} {pred_std:.1f}%p{'':<4} {max_pred_pos}({max_pred_pct:.1f}%){'':<2} {r['overall_acc']:.1f}%") |
| else: |
| print(f"{'':<45} {r['subset']:<12} {gt_std:.1f}%p{'':<4} {pred_std:.1f}%p{'':<4} {max_pred_pos}({max_pred_pct:.1f}%){'':<2} {r['overall_acc']:.1f}%") |
|
|
|
|
| EVAL_OUTPUT_DIR = 'VLMEvalKit/outputs' |
|
|
| DEFAULT_MODELS = [ |
| |
| 'molmo-7B-O-0924/molmo-7B-O-0924', |
| 'molmo-7B-O-0924-data_scale_exp_80k/molmo-7B-O-0924-data_scale_exp_80k', |
| 'molmo-7B-O-0924-data_scale_exp_400k/molmo-7B-O-0924-data_scale_exp_400k', |
| 'molmo-7B-O-0924-data_scale_exp_800k/molmo-7B-O-0924-data_scale_exp_800k', |
| 'molmo-7B-O-0924-data_scale_exp_2m/molmo-7B-O-0924-data_scale_exp_2m', |
| |
| 'NVILA-Lite-2B/NVILA-Lite-2B', |
| 'NVILA-Lite-2B-data-scale-exp-80k/NVILA-Lite-2B-data-scale-exp-80k', |
| 'NVILA-Lite-2B-data-scale-exp-400k/NVILA-Lite-2B-data-scale-exp-400k', |
| 'NVILA-Lite-2B-data-scale-exp-800k/NVILA-Lite-2B-data-scale-exp-800k', |
| 'NVILA-Lite-2B-data-scale-exp-2m/NVILA-Lite-2B-data-scale-exp-2m', |
| 'RoboRefer-2B-SFT/RoboRefer-2B-SFT', |
| |
| 'Qwen2.5-VL-3B-Instruct/Qwen2.5-VL-3B-Instruct', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_80k/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_400k/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_800k/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_2m/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m', |
| ] |
|
|
|
|
| def get_default_xlsx_paths(): |
| return [f'{EVAL_OUTPUT_DIR}/{m}_EmbSpatialBench_openai_result.xlsx' for m in DEFAULT_MODELS] |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description='Answer/Prediction Bias ๋ถ์') |
| parser.add_argument('xlsx_files', nargs='*', help='Model result xlsx files (์์ผ๋ฉด ๊ธฐ๋ณธ ๋ชจ๋ธ ์ฌ์ฉ)') |
| parser.add_argument('--compare', action='store_true', help='Compare multiple models (summary only)') |
| parser.add_argument('--detail', action='store_true', help='Show detailed report for each model') |
| parser.add_argument('--output', '-o', type=str, help='Save results to file') |
|
|
| args = parser.parse_args() |
|
|
| xlsx_files = args.xlsx_files if args.xlsx_files else get_default_xlsx_paths() |
|
|
| if args.output: |
| tee = TeeWriter(args.output) |
| sys.stdout = tee |
|
|
| if args.compare and not args.detail: |
| compare_models_bias(xlsx_files) |
| else: |
| for xlsx_path in xlsx_files: |
| results = analyze_model(xlsx_path) |
| print_bias_report(xlsx_path, results) |
|
|
| if len(xlsx_files) > 1: |
| compare_models_bias(xlsx_files) |
|
|
| if args.output: |
| sys.stdout = tee.close() |
| print(f"Results saved to {args.output}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|