ch-min commited on
Commit
3404d44
·
verified ·
1 Parent(s): b96543f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. analyze_answer_bias.py +255 -0
  2. analyze_counter_consistent.py +627 -0
  3. analyze_heuristic_position.py +256 -0
  4. answer_bias_results.txt +1607 -0
  5. correct_filter/results/nvila/correct_only/csv/similarity_2m_L0.csv +7 -0
  6. correct_filter/results/nvila/correct_only/csv/similarity_2m_L10.csv +7 -0
  7. correct_filter/results/nvila/correct_only/csv/similarity_2m_L12.csv +7 -0
  8. correct_filter/results/nvila/correct_only/csv/similarity_2m_L15.csv +7 -0
  9. correct_filter/results/nvila/correct_only/csv/similarity_2m_L20.csv +7 -0
  10. correct_filter/results/nvila/correct_only/csv/similarity_2m_L21.csv +7 -0
  11. correct_filter/results/nvila/correct_only/csv/similarity_2m_L24.csv +7 -0
  12. correct_filter/results/nvila/correct_only/csv/similarity_2m_L4.csv +7 -0
  13. correct_filter/results/nvila/correct_only/csv/similarity_400k_L0.csv +7 -0
  14. correct_filter/results/nvila/correct_only/csv/similarity_400k_L10.csv +7 -0
  15. correct_filter/results/nvila/correct_only/csv/similarity_400k_L12.csv +7 -0
  16. correct_filter/results/nvila/correct_only/csv/similarity_400k_L13.csv +7 -0
  17. correct_filter/results/nvila/correct_only/csv/similarity_400k_L14.csv +7 -0
  18. correct_filter/results/nvila/correct_only/csv/similarity_400k_L15.csv +7 -0
  19. correct_filter/results/nvila/correct_only/csv/similarity_400k_L20.csv +7 -0
  20. correct_filter/results/nvila/correct_only/csv/similarity_400k_L21.csv +7 -0
  21. correct_filter/results/nvila/correct_only/csv/similarity_400k_L22.csv +7 -0
  22. correct_filter/results/nvila/correct_only/csv/similarity_400k_L27.csv +7 -0
  23. correct_filter/results/nvila/correct_only/csv/similarity_400k_L5.csv +7 -0
  24. correct_filter/results/nvila/correct_only/csv/similarity_400k_L6.csv +7 -0
  25. correct_filter/results/nvila/correct_only/csv/similarity_400k_L7.csv +7 -0
  26. correct_filter/results/nvila/correct_only/csv/similarity_800k_L10.csv +7 -0
  27. correct_filter/results/nvila/correct_only/csv/similarity_800k_L15.csv +7 -0
  28. correct_filter/results/nvila/correct_only/csv/similarity_800k_L16.csv +7 -0
  29. correct_filter/results/nvila/correct_only/csv/similarity_800k_L18.csv +7 -0
  30. correct_filter/results/nvila/correct_only/csv/similarity_800k_L19.csv +7 -0
  31. correct_filter/results/nvila/correct_only/csv/similarity_800k_L20.csv +7 -0
  32. correct_filter/results/nvila/correct_only/csv/similarity_800k_L27.csv +7 -0
  33. correct_filter/results/nvila/correct_only/csv/similarity_800k_L4.csv +7 -0
  34. correct_filter/results/nvila/correct_only/csv/similarity_800k_L7.csv +7 -0
  35. correct_filter/results/nvila/correct_only/csv/similarity_800k_L8.csv +7 -0
  36. correct_filter/results/nvila/correct_only/csv/similarity_80k_L0.csv +7 -0
  37. correct_filter/results/nvila/correct_only/csv/similarity_80k_L1.csv +7 -0
  38. correct_filter/results/nvila/correct_only/csv/similarity_80k_L12.csv +7 -0
  39. correct_filter/results/nvila/correct_only/csv/similarity_80k_L13.csv +7 -0
  40. correct_filter/results/nvila/correct_only/csv/similarity_80k_L18.csv +7 -0
  41. correct_filter/results/nvila/correct_only/csv/similarity_80k_L2.csv +7 -0
  42. correct_filter/results/nvila/correct_only/csv/similarity_80k_L20.csv +7 -0
  43. correct_filter/results/nvila/correct_only/csv/similarity_80k_L24.csv +7 -0
  44. correct_filter/results/nvila/correct_only/csv/similarity_80k_L26.csv +7 -0
  45. correct_filter/results/nvila/correct_only/csv/similarity_80k_L5.csv +7 -0
  46. correct_filter/results/nvila/correct_only/csv/similarity_80k_L7.csv +7 -0
  47. correct_filter/results/nvila/correct_only/csv/similarity_80k_L9.csv +7 -0
  48. correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L12.csv +7 -0
  49. correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L13.csv +7 -0
  50. correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L16.csv +7 -0
analyze_answer_bias.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Answer/Prediction Bias 분석 스크립트
4
+
5
+ 1. GT Answer Distribution: 정답이 A, B, C, D 중 어디에 편중되어 있는지
6
+ 2. Model Prediction Distribution: 모델이 특정 선택지를 더 많이 선택하는지
7
+
8
+ Usage:
9
+ python experiments/analyze_answer_bias.py <model_result.xlsx> [--subset far_close]
10
+ python experiments/analyze_answer_bias.py --compare <file1.xlsx> <file2.xlsx> ...
11
+ """
12
+
13
+ import argparse
14
+ import sys
15
+ import pandas as pd
16
+ import numpy as np
17
+ from pathlib import Path
18
+ from typing import Dict, List
19
+ from collections import Counter
20
+
21
+
22
+ class TeeWriter:
23
+ """stdout을 터미널과 파일에 동시에 출력"""
24
+ def __init__(self, filepath):
25
+ self.terminal = sys.stdout
26
+ self.file = open(filepath, 'w', encoding='utf-8')
27
+
28
+ def write(self, message):
29
+ self.terminal.write(message)
30
+ self.file.write(message)
31
+
32
+ def flush(self):
33
+ self.terminal.flush()
34
+ self.file.flush()
35
+
36
+ def close(self):
37
+ self.file.close()
38
+ return self.terminal
39
+
40
+
41
+ def extract_answer_letter(val) -> str:
42
+ """예측값에서 A/B/C/D 추출 (예: 'D. basket' -> 'D')"""
43
+ if pd.isna(val):
44
+ return 'INVALID'
45
+ val = str(val).strip()
46
+ if len(val) == 0:
47
+ return 'INVALID'
48
+ first_char = val[0].upper()
49
+ if first_char in ['A', 'B', 'C', 'D']:
50
+ return first_char
51
+ return 'INVALID'
52
+
53
+
54
+ def analyze_bias(df: pd.DataFrame, subset_name: str = "ALL") -> Dict:
55
+ """
56
+ Answer/Prediction bias 분석
57
+ """
58
+ # GT Answer 분포 (이미 A/B/C/D 형태)
59
+ gt_dist = Counter(df['answer'])
60
+ gt_total = sum(gt_dist.values())
61
+
62
+ # Prediction 분포 - 첫 글자 추출
63
+ pred_letters = df['prediction'].apply(extract_answer_letter)
64
+ pred_dist = Counter(pred_letters)
65
+ pred_total = sum(v for k, v in pred_dist.items() if k != 'INVALID')
66
+
67
+ # 정답률 by GT position
68
+ acc_by_pos = {}
69
+ for ans in ['A', 'B', 'C', 'D']:
70
+ subset = df[df['answer'] == ans]
71
+ if len(subset) > 0:
72
+ acc_by_pos[ans] = subset['hit'].mean() * 100
73
+ else:
74
+ acc_by_pos[ans] = 0
75
+
76
+ # Prediction이 GT와 일치하는 비율 (hit rate by prediction position)
77
+ # prediction에서 첫 글자 추출해서 매칭
78
+ df_with_pred_letter = df.copy()
79
+ df_with_pred_letter['pred_letter'] = pred_letters.values
80
+ hit_by_pred = {}
81
+ for pred in ['A', 'B', 'C', 'D']:
82
+ subset = df_with_pred_letter[df_with_pred_letter['pred_letter'] == pred]
83
+ if len(subset) > 0:
84
+ hit_by_pred[pred] = subset['hit'].mean() * 100
85
+ else:
86
+ hit_by_pred[pred] = 0
87
+
88
+ return {
89
+ 'subset': subset_name,
90
+ 'total': len(df),
91
+ 'gt_dist': {k: gt_dist.get(k, 0) for k in ['A', 'B', 'C', 'D']},
92
+ 'gt_pct': {k: gt_dist.get(k, 0) / gt_total * 100 if gt_total > 0 else 0 for k in ['A', 'B', 'C', 'D']},
93
+ 'pred_dist': {k: pred_dist.get(k, 0) for k in ['A', 'B', 'C', 'D']},
94
+ 'pred_pct': {k: pred_dist.get(k, 0) / pred_total * 100 if pred_total > 0 else 0 for k in ['A', 'B', 'C', 'D']},
95
+ 'acc_by_gt_pos': acc_by_pos,
96
+ 'hit_by_pred_pos': hit_by_pred,
97
+ 'overall_acc': df['hit'].mean() * 100
98
+ }
99
+
100
+
101
+ def print_bias_report(xlsx_path: str, results: List[Dict]):
102
+ """Bias 분석 리포트 출력"""
103
+ model_name = Path(xlsx_path).stem.replace('_EmbSpatialBench_openai_result', '')
104
+ # 이름 축약
105
+ if len(model_name) > 50:
106
+ model_name = model_name[:47] + "..."
107
+
108
+ print(f"\n{'='*80}")
109
+ print(f"Model: {model_name}")
110
+ print(f"{'='*80}")
111
+
112
+ for r in results:
113
+ print(f"\n--- {r['subset']} (n={r['total']}) ---")
114
+
115
+ # GT Distribution
116
+ print(f"\n GT Answer Distribution:")
117
+ print(f" {'Pos':<5} {'Count':<8} {'Pct':<8} {'Acc when GT':<12}")
118
+ print(f" {'-'*35}")
119
+ for pos in ['A', 'B', 'C', 'D']:
120
+ print(f" {pos:<5} {r['gt_dist'][pos]:<8} {r['gt_pct'][pos]:.1f}%{'':<4} {r['acc_by_gt_pos'][pos]:.1f}%")
121
+
122
+ # Prediction Distribution
123
+ print(f"\n Model Prediction Distribution:")
124
+ print(f" {'Pos':<5} {'Count':<8} {'Pct':<8} {'Acc when Pred':<12}")
125
+ print(f" {'-'*35}")
126
+ for pos in ['A', 'B', 'C', 'D']:
127
+ print(f" {pos:<5} {r['pred_dist'][pos]:<8} {r['pred_pct'][pos]:.1f}%{'':<4} {r['hit_by_pred_pos'][pos]:.1f}%")
128
+
129
+ # Bias 지표
130
+ gt_std = np.std([r['gt_pct'][p] for p in ['A', 'B', 'C', 'D']])
131
+ pred_std = np.std([r['pred_pct'][p] for p in ['A', 'B', 'C', 'D']])
132
+
133
+ print(f"\n Bias Indicators:")
134
+ print(f" GT Distribution Std: {gt_std:.2f}%p (uniform=0)")
135
+ print(f" Pred Distribution Std: {pred_std:.2f}%p (uniform=0)")
136
+ print(f" Overall Accuracy: {r['overall_acc']:.1f}%")
137
+
138
+
139
+ def analyze_model(xlsx_path: str, include_subsets: bool = True) -> List[Dict]:
140
+ """모델 결과 분석"""
141
+ df = pd.read_excel(xlsx_path)
142
+
143
+ results = []
144
+
145
+ # 전체 분석
146
+ results.append(analyze_bias(df, "ALL"))
147
+
148
+ if include_subsets:
149
+ # FAR/CLOSE만 분석
150
+ far_close_df = df[df['category'].isin(['far', 'close'])]
151
+ if len(far_close_df) > 0:
152
+ results.append(analyze_bias(far_close_df, "FAR+CLOSE"))
153
+
154
+ # Category별 분석
155
+ for cat in ['far', 'close']:
156
+ cat_df = df[df['category'] == cat]
157
+ if len(cat_df) > 0:
158
+ results.append(analyze_bias(cat_df, cat.upper()))
159
+
160
+ return results
161
+
162
+
163
+ def compare_models_bias(xlsx_paths: List[str]):
164
+ """여러 모델의 bias 비교 (요약 테이블)"""
165
+
166
+ print(f"\n{'='*100}")
167
+ print("MODEL BIAS COMPARISON SUMMARY")
168
+ print(f"{'='*100}")
169
+
170
+ # Header
171
+ print(f"\n{'Model':<45} {'Subset':<12} {'GT Std':<10} {'Pred Std':<10} {'Pred Max':<12} {'Acc':<8}")
172
+ print("-" * 97)
173
+
174
+ for xlsx_path in xlsx_paths:
175
+ model_name = Path(xlsx_path).stem.replace('_EmbSpatialBench_openai_result', '')
176
+ if len(model_name) > 43:
177
+ model_name = model_name[:40] + "..."
178
+
179
+ results = analyze_model(xlsx_path, include_subsets=True)
180
+
181
+ for r in results:
182
+ gt_std = np.std([r['gt_pct'][p] for p in ['A', 'B', 'C', 'D']])
183
+ pred_std = np.std([r['pred_pct'][p] for p in ['A', 'B', 'C', 'D']])
184
+
185
+ # 가장 많이 선택한 position
186
+ max_pred_pos = max(r['pred_pct'], key=r['pred_pct'].get)
187
+ max_pred_pct = r['pred_pct'][max_pred_pos]
188
+
189
+ if r['subset'] == 'ALL':
190
+ print(f"{model_name:<45} {r['subset']:<12} {gt_std:.1f}%p{'':<4} {pred_std:.1f}%p{'':<4} {max_pred_pos}({max_pred_pct:.1f}%){'':<2} {r['overall_acc']:.1f}%")
191
+ else:
192
+ print(f"{'':<45} {r['subset']:<12} {gt_std:.1f}%p{'':<4} {pred_std:.1f}%p{'':<4} {max_pred_pos}({max_pred_pct:.1f}%){'':<2} {r['overall_acc']:.1f}%")
193
+
194
+
195
+ EVAL_OUTPUT_DIR = 'VLMEvalKit/outputs'
196
+
197
+ DEFAULT_MODELS = [
198
+ # Molmo-7B
199
+ 'molmo-7B-O-0924/molmo-7B-O-0924',
200
+ 'molmo-7B-O-0924-data_scale_exp_80k/molmo-7B-O-0924-data_scale_exp_80k',
201
+ 'molmo-7B-O-0924-data_scale_exp_400k/molmo-7B-O-0924-data_scale_exp_400k',
202
+ 'molmo-7B-O-0924-data_scale_exp_800k/molmo-7B-O-0924-data_scale_exp_800k',
203
+ 'molmo-7B-O-0924-data_scale_exp_2m/molmo-7B-O-0924-data_scale_exp_2m',
204
+ # NVILA-Lite-2B
205
+ 'NVILA-Lite-2B/NVILA-Lite-2B',
206
+ 'NVILA-Lite-2B-data-scale-exp-80k/NVILA-Lite-2B-data-scale-exp-80k',
207
+ 'NVILA-Lite-2B-data-scale-exp-400k/NVILA-Lite-2B-data-scale-exp-400k',
208
+ 'NVILA-Lite-2B-data-scale-exp-800k/NVILA-Lite-2B-data-scale-exp-800k',
209
+ 'NVILA-Lite-2B-data-scale-exp-2m/NVILA-Lite-2B-data-scale-exp-2m',
210
+ 'RoboRefer-2B-SFT/RoboRefer-2B-SFT',
211
+ # Qwen2.5-VL-3B
212
+ 'Qwen2.5-VL-3B-Instruct/Qwen2.5-VL-3B-Instruct',
213
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_80k/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k',
214
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_400k/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k',
215
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_800k/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k',
216
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_2m/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m',
217
+ ]
218
+
219
+
220
+ def get_default_xlsx_paths():
221
+ return [f'{EVAL_OUTPUT_DIR}/{m}_EmbSpatialBench_openai_result.xlsx' for m in DEFAULT_MODELS]
222
+
223
+
224
+ def main():
225
+ parser = argparse.ArgumentParser(description='Answer/Prediction Bias 분석')
226
+ parser.add_argument('xlsx_files', nargs='*', help='Model result xlsx files (없으면 기본 모델 사용)')
227
+ parser.add_argument('--compare', action='store_true', help='Compare multiple models (summary only)')
228
+ parser.add_argument('--detail', action='store_true', help='Show detailed report for each model')
229
+ parser.add_argument('--output', '-o', type=str, help='Save results to file')
230
+
231
+ args = parser.parse_args()
232
+
233
+ xlsx_files = args.xlsx_files if args.xlsx_files else get_default_xlsx_paths()
234
+
235
+ if args.output:
236
+ tee = TeeWriter(args.output)
237
+ sys.stdout = tee
238
+
239
+ if args.compare and not args.detail:
240
+ compare_models_bias(xlsx_files)
241
+ else:
242
+ for xlsx_path in xlsx_files:
243
+ results = analyze_model(xlsx_path)
244
+ print_bias_report(xlsx_path, results)
245
+
246
+ if len(xlsx_files) > 1:
247
+ compare_models_bias(xlsx_files)
248
+
249
+ if args.output:
250
+ sys.stdout = tee.close()
251
+ print(f"Results saved to {args.output}")
252
+
253
+
254
+ if __name__ == '__main__':
255
+ main()
analyze_counter_consistent.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Counter vs Consistent Example Analysis Script
4
+
5
+ 2D Heuristic (shared across datasets):
6
+ Upper part of image (small y) = farther from camera
7
+ Lower part of image (large y) = closer to camera
8
+
9
+ Datasets:
10
+ embspatial (default):
11
+ FAR/CLOSE questions in EmbSpatial-Bench
12
+ Consistent: GT answer agrees with the 2D heuristic (Height-Depth Entanglement)
13
+ Counter: GT answer contradicts the 2D heuristic
14
+
15
+ cvbench3d:
16
+ Depth questions: "Which object is closer to the camera?"
17
+ Consistent: GT object (closer) has larger center_y (lower in image)
18
+ Counter: GT object (closer) has smaller center_y (higher in image)
19
+ Distance questions: "Which object is closer to [reference]?"
20
+ 2D heuristic: smaller pixel distance to reference = closer in 3D
21
+ Consistent: GT candidate has smaller 2D pixel distance to reference
22
+ Counter: GT candidate has larger 2D pixel distance to reference
23
+
24
+ Usage:
25
+ python experiments/analyze_counter_consistent.py <model_result.xlsx> [--verbose]
26
+ python experiments/analyze_counter_consistent.py --compare <file1.xlsx> <file2.xlsx> ...
27
+ python experiments/analyze_counter_consistent.py --dataset cvbench3d <result.xlsx>
28
+ python experiments/analyze_counter_consistent.py --dataset cvbench3d --compare <file1.xlsx> ...
29
+ """
30
+
31
+ import argparse
32
+ import ast
33
+ import pandas as pd
34
+ import numpy as np
35
+ from datasets import load_dataset
36
+ from pathlib import Path
37
+ from typing import Dict, List, Tuple, Optional
38
+ import json
39
+ import sys
40
+
41
+
42
+ class TeeWriter:
43
+ """Write stdout to both terminal and file simultaneously"""
44
+ def __init__(self, filepath):
45
+ self.terminal = sys.stdout
46
+ self.file = open(filepath, 'w', encoding='utf-8')
47
+
48
+ def write(self, message):
49
+ self.terminal.write(message)
50
+ self.file.write(message)
51
+ self.file.flush()
52
+
53
+ def flush(self):
54
+ self.terminal.flush()
55
+ self.file.flush()
56
+
57
+ def close(self):
58
+ self.file.close()
59
+ return self.terminal
60
+
61
+
62
+ # =============================================================================
63
+ # EmbSpatial-Bench
64
+ # =============================================================================
65
+
66
+ def get_bbox_center_y(bbox: List[int], source: str = None) -> float:
67
+ """
68
+ BBox -> center y coordinate, format varies by source:
69
+ ScanNet / MP3D : [x1, y1, w, h ] -> y1 + h/2
70
+ AI2Thor : [x1, y1, x2, y2] -> (y1 + y2) / 2
71
+ """
72
+ if source == 'ai2thor':
73
+ return (bbox[1] + bbox[3]) / 2
74
+ else:
75
+ return bbox[1] + bbox[3] / 2
76
+
77
+
78
+ def classify_sample(relation: str, objects: Dict, gt_answer_idx: int,
79
+ answer_options: List[str] = None,
80
+ image_height: int = None, threshold_ratio: float = 0.05,
81
+ data_source: str = None) -> Tuple[str, Dict]:
82
+ """
83
+ Classify a sample as Consistent / Counter / Ambiguous.
84
+
85
+ Args:
86
+ relation: 'far' or 'close'
87
+ objects: {'bbox': [...], 'name': [...]}
88
+ gt_answer_idx: GT answer index (0-based, relative to answer_options)
89
+ answer_options: list of answer choices (used to match bbox by name)
90
+ image_height: image height for threshold normalization (pass PIL image.size[1])
91
+ threshold_ratio: ambiguous decision threshold as a fraction of image height
92
+ data_source: 'scannet' | 'mp3d' | 'ai2thor' (selects bbox format)
93
+
94
+ Returns:
95
+ classification: 'consistent', 'counter', or 'ambiguous'
96
+ details: dict with classification details
97
+ """
98
+ if relation not in ['far', 'close']:
99
+ return 'not_applicable', {}
100
+
101
+ bboxes = objects['bbox']
102
+ names = objects['name']
103
+
104
+ if len(bboxes) < 2:
105
+ return 'insufficient_objects', {}
106
+
107
+ # answer_options and objects['name'] may differ (e.g. 'Unknown')
108
+ # resolve GT answer index against objects['name']
109
+ if answer_options is not None and gt_answer_idx < len(answer_options):
110
+ gt_answer_name = answer_options[gt_answer_idx]
111
+ if gt_answer_name in names:
112
+ gt_answer_idx = names.index(gt_answer_name)
113
+ elif gt_answer_name == 'Unknown' or gt_answer_idx >= len(bboxes):
114
+ return 'unknown_object', {}
115
+
116
+ # bounds check
117
+ if gt_answer_idx >= len(bboxes):
118
+ return 'index_out_of_range', {}
119
+
120
+ # compute center y per object using source-specific bbox format
121
+ center_ys = [get_bbox_center_y(bbox, source=data_source) for bbox in bboxes]
122
+
123
+ # GT object center y
124
+ gt_center_y = center_ys[gt_answer_idx]
125
+
126
+ # mean center y of all other objects
127
+ other_ys = [y for i, y in enumerate(center_ys) if i != gt_answer_idx]
128
+ other_avg_y = np.mean(other_ys)
129
+
130
+ # y difference
131
+ y_diff = gt_center_y - other_avg_y
132
+
133
+ # threshold normalized by image height
134
+ if image_height:
135
+ threshold = image_height * threshold_ratio
136
+ else:
137
+ threshold = 20 # fallback: 20 pixels
138
+
139
+ details = {
140
+ 'gt_object': names[gt_answer_idx],
141
+ 'gt_center_y': gt_center_y,
142
+ 'other_avg_y': other_avg_y,
143
+ 'y_diff': y_diff,
144
+ 'threshold': threshold,
145
+ 'all_objects': list(zip(names, center_ys))
146
+ }
147
+
148
+ # ambiguous if difference is too small
149
+ if abs(y_diff) < threshold:
150
+ return 'ambiguous', details
151
+
152
+ # FAR: consistent if GT is higher (smaller y)
153
+ if relation == 'far':
154
+ if gt_center_y < other_avg_y:
155
+ return 'consistent', details
156
+ else:
157
+ return 'counter', details
158
+
159
+ # CLOSE: consistent if GT is lower (larger y)
160
+ else:
161
+ if gt_center_y > other_avg_y:
162
+ return 'consistent', details
163
+ else:
164
+ return 'counter', details
165
+
166
+
167
+ def get_image_height_by_source(data_source: str) -> int:
168
+ """Return fallback image height by data source (used when PIL image is unavailable)"""
169
+ heights = {
170
+ 'ai2thor': 300,
171
+ 'mp3d': 480,
172
+ 'scannet': 968,
173
+ }
174
+ return heights.get(data_source, 480)
175
+
176
+
177
+ def build_classification_cache(verbose: bool = False) -> Dict[str, Dict]:
178
+ """
179
+ Build a counter/consistent classification cache for the full EmbSpatial-Bench dataset.
180
+ """
181
+ print("Loading EmbSpatial-Bench dataset...")
182
+ ds = load_dataset('FlagEval/EmbSpatial-Bench', split='test')
183
+
184
+ cache = {}
185
+ stats = {'far': {'consistent': 0, 'counter': 0, 'ambiguous': 0},
186
+ 'close': {'consistent': 0, 'counter': 0, 'ambiguous': 0}}
187
+
188
+ for item in ds:
189
+ question_id = item['question_id']
190
+ relation = item['relation']
191
+
192
+ if relation not in ['far', 'close']:
193
+ cache[question_id] = {'classification': 'not_applicable', 'relation': relation}
194
+ continue
195
+
196
+ objects = item['objects']
197
+ gt_answer_idx = item['answer'] # 0-based index
198
+ answer_options = item['answer_options']
199
+ data_source = item['data_source']
200
+
201
+ # use actual image height from PIL image (image.size -> (width, height))
202
+ pil_image = item.get('image')
203
+ if pil_image is not None and hasattr(pil_image, 'size'):
204
+ image_height = pil_image.size[1]
205
+ else:
206
+ image_height = get_image_height_by_source(data_source)
207
+
208
+ classification, details = classify_sample(
209
+ relation, objects, gt_answer_idx, answer_options, image_height,
210
+ data_source=data_source
211
+ )
212
+
213
+ cache[question_id] = {
214
+ 'classification': classification,
215
+ 'relation': relation,
216
+ 'data_source': item['data_source'],
217
+ 'details': details
218
+ }
219
+
220
+ if relation in stats and classification in stats[relation]:
221
+ stats[relation][classification] += 1
222
+
223
+ if verbose:
224
+ print("\n=== Classification Statistics ===")
225
+ for rel in ['far', 'close']:
226
+ total = sum(stats[rel].values())
227
+ print(f"\n{rel.upper()} (n={total}):")
228
+ for cls, cnt in stats[rel].items():
229
+ pct = cnt / total * 100 if total > 0 else 0
230
+ print(f" {cls}: {cnt} ({pct:.1f}%)")
231
+
232
+ return cache
233
+
234
+
235
+ def analyze_embspatial_results(xlsx_path: str, cache: Dict[str, Dict],
236
+ verbose: bool = False) -> Tuple[Dict, List[Dict]]:
237
+ """Analyze a model result xlsx file against the EmbSpatialBench classification cache."""
238
+ df = pd.read_excel(xlsx_path)
239
+
240
+ results = {
241
+ 'far': {
242
+ 'consistent': {'correct': 0, 'total': 0},
243
+ 'counter': {'correct': 0, 'total': 0},
244
+ 'ambiguous': {'correct': 0, 'total': 0}
245
+ },
246
+ 'close': {
247
+ 'consistent': {'correct': 0, 'total': 0},
248
+ 'counter': {'correct': 0, 'total': 0},
249
+ 'ambiguous': {'correct': 0, 'total': 0}
250
+ }
251
+ }
252
+
253
+ counter_examples = []
254
+
255
+ for _, row in df.iterrows():
256
+ question_id = row['question_id']
257
+ category = row['category']
258
+ hit = row['hit']
259
+
260
+ if category not in ['far', 'close']:
261
+ continue
262
+
263
+ if question_id not in cache:
264
+ continue
265
+
266
+ info = cache[question_id]
267
+ classification = info['classification']
268
+
269
+ if classification not in ['consistent', 'counter', 'ambiguous']:
270
+ continue
271
+
272
+ results[category][classification]['total'] += 1
273
+ if hit == 1:
274
+ results[category][classification]['correct'] += 1
275
+
276
+ if classification == 'counter':
277
+ counter_examples.append({
278
+ 'question_id': question_id,
279
+ 'relation': category,
280
+ 'hit': hit,
281
+ 'prediction': row['prediction'],
282
+ 'answer': row['answer'],
283
+ 'data_source': info['data_source'],
284
+ 'details': info.get('details', {})
285
+ })
286
+
287
+ return results, counter_examples
288
+
289
+
290
+ # =============================================================================
291
+ # CV-Bench-3D
292
+ # =============================================================================
293
+
294
+ # Known image heights per source dataset (used for threshold normalization)
295
+ # Omni3D_SUNRGBD has variable sizes; fallback to max bbox y2 estimate.
296
+ _CVBENCH3D_SOURCE_HEIGHTS = {
297
+ 'Omni3D_Hypersim': 768,
298
+ 'Omni3D_nuScenes': 900,
299
+ }
300
+
301
+
302
+ def classify_cvbench3d_row(row, depth_threshold_ratio: float = 0.05) -> Tuple[str, Dict]:
303
+ """
304
+ Classify a single CV-Bench-3D row as consistent / counter / ambiguous.
305
+
306
+ Only Depth questions are classified — they share the same height-depth
307
+ entanglement heuristic as EmbSpatial-Bench:
308
+ 2D heuristic: lower in image (larger center_y) = closer to camera
309
+ Consistent: GT object (closer to camera) has larger center_y
310
+ Counter: GT object (closer to camera) has smaller center_y
311
+
312
+ Distance questions ask "which object is closer to [reference] in 3D real-world
313
+ distance?" — this is inter-object 3D distance, not viewer distance. No
314
+ equivalent 2D projection heuristic exists (height-depth entanglement does not
315
+ apply), so Distance rows are always marked 'not_applicable'.
316
+ """
317
+ category = row['category']
318
+ answer_letter = str(row['answer']).strip()
319
+
320
+ if category != 'Depth':
321
+ return 'not_applicable', {}
322
+
323
+ try:
324
+ bbox_list = ast.literal_eval(row['bbox'])
325
+ except (ValueError, SyntaxError):
326
+ return 'invalid_bbox', {}
327
+
328
+ if len(bbox_list) != 2:
329
+ return 'invalid_bbox', {}
330
+
331
+ cy_A = (bbox_list[0][1] + bbox_list[0][3]) / 2
332
+ cy_B = (bbox_list[1][1] + bbox_list[1][3]) / 2
333
+
334
+ gt_y = cy_A if answer_letter == 'A' else cy_B
335
+ other_y = cy_B if answer_letter == 'A' else cy_A
336
+ y_diff = gt_y - other_y # positive = GT is lower in image
337
+
338
+ # Estimate image height: prefer known source height, fall back to max bbox y2
339
+ source_dataset = str(row.get('source_dataset', ''))
340
+ known_h = _CVBENCH3D_SOURCE_HEIGHTS.get(source_dataset, 0)
341
+ est_h = max(bb[3] for bb in bbox_list)
342
+ image_height = max(known_h, est_h)
343
+ threshold = image_height * depth_threshold_ratio
344
+
345
+ details = {
346
+ 'answer': answer_letter,
347
+ 'center_y_A': cy_A,
348
+ 'center_y_B': cy_B,
349
+ 'y_diff': y_diff,
350
+ 'threshold': threshold,
351
+ 'image_height_est': image_height,
352
+ 'source_dataset': source_dataset,
353
+ }
354
+
355
+ if abs(y_diff) < threshold:
356
+ return 'ambiguous', details
357
+ # Consistent: GT (closer to camera) is lower in image (larger y)
358
+ return ('consistent' if gt_y > other_y else 'counter'), details
359
+
360
+
361
+ def analyze_cvbench3d_results(xlsx_path: str, verbose: bool = False,
362
+ depth_threshold_ratio: float = 0.05) -> Tuple[Dict, List[Dict]]:
363
+ """
364
+ Analyze a CV-Bench-3D result xlsx file.
365
+
366
+ Only the Depth category is classified into consistent / counter / ambiguous,
367
+ because it shares the height-depth entanglement heuristic with EmbSpatial-Bench.
368
+ Distance (inter-object 3D distance) has no analogous 2D projection heuristic
369
+ and is excluded from the consistent/counter analysis.
370
+ """
371
+ df = pd.read_excel(xlsx_path)
372
+
373
+ results = {
374
+ 'Depth': {
375
+ 'consistent': {'correct': 0, 'total': 0},
376
+ 'counter': {'correct': 0, 'total': 0},
377
+ 'ambiguous': {'correct': 0, 'total': 0},
378
+ },
379
+ # Distance: excluded — no height-depth entanglement heuristic for inter-object distance
380
+ }
381
+
382
+ counter_examples = []
383
+
384
+ for _, row in df.iterrows():
385
+ category = row['category']
386
+ if category != 'Depth':
387
+ continue
388
+
389
+ hit = row['hit']
390
+ classification, details = classify_cvbench3d_row(row, depth_threshold_ratio)
391
+
392
+ if classification not in ['consistent', 'counter', 'ambiguous']:
393
+ continue
394
+
395
+ results['Depth'][classification]['total'] += 1
396
+ if hit == 1:
397
+ results['Depth'][classification]['correct'] += 1
398
+
399
+ if classification == 'counter':
400
+ counter_examples.append({
401
+ 'index': row['index'],
402
+ 'category': category,
403
+ 'hit': hit,
404
+ 'prediction': row['prediction'],
405
+ 'answer': row['answer'],
406
+ 'source_dataset': row.get('source_dataset', ''),
407
+ 'details': details,
408
+ })
409
+
410
+ if verbose:
411
+ print("\n=== CV-Bench-3D Depth Classification Statistics ===")
412
+ total = sum(results['Depth'][c]['total'] for c in ['consistent', 'counter', 'ambiguous'])
413
+ print(f"Depth (n={total}):")
414
+ for cls in ['consistent', 'counter', 'ambiguous']:
415
+ n = results['Depth'][cls]['total']
416
+ pct = n / total * 100 if total > 0 else 0
417
+ print(f" {cls}: {n} ({pct:.1f}%)")
418
+ print("(Distance excluded: no 2D heuristic applies for inter-object 3D distance)")
419
+
420
+ return results, counter_examples
421
+
422
+
423
+ # =============================================================================
424
+ # Generic report / compare (works for both datasets)
425
+ # =============================================================================
426
+
427
+ _XLSX_SUFFIXES = {
428
+ 'embspatial': [
429
+ '_EmbSpatialBench_openai_result',
430
+ '_EmbSpatialBench_exact_matching_result',
431
+ ],
432
+ 'cvbench3d': [
433
+ '_CV-Bench-3D_chatgpt-0125_result',
434
+ '_CV-Bench-3D_exact_matching_result',
435
+ ],
436
+ }
437
+
438
+
439
+ def extract_model_name(xlsx_path: str, dataset: str) -> str:
440
+ stem = Path(xlsx_path).stem
441
+ for suffix in _XLSX_SUFFIXES.get(dataset, []):
442
+ stem = stem.replace(suffix, '')
443
+ return stem
444
+
445
+
446
+ def print_analysis_report(xlsx_path: str, results: Dict, counter_examples: List[Dict],
447
+ dataset: str) -> Dict:
448
+ """Print analysis report for a single model (works for any dataset)."""
449
+ model_name = extract_model_name(xlsx_path, dataset)
450
+
451
+ print(f"\n{'='*70}")
452
+ print(f"Model: {model_name}")
453
+ print(f"{'='*70}")
454
+
455
+ print(f"\n{'Category':<12} {'Type':<12} {'Correct':<10} {'Total':<10} {'Accuracy':<10}")
456
+ print("-" * 54)
457
+
458
+ total_consistent = {'correct': 0, 'total': 0}
459
+ total_counter = {'correct': 0, 'total': 0}
460
+
461
+ for category in results:
462
+ for cls_type in ['consistent', 'counter', 'ambiguous']:
463
+ data = results[category][cls_type]
464
+ if data['total'] > 0:
465
+ acc = data['correct'] / data['total'] * 100
466
+ print(f"{category:<12} {cls_type:<12} {data['correct']:<10} {data['total']:<10} {acc:.1f}%")
467
+
468
+ if cls_type == 'consistent':
469
+ total_consistent['correct'] += data['correct']
470
+ total_consistent['total'] += data['total']
471
+ elif cls_type == 'counter':
472
+ total_counter['correct'] += data['correct']
473
+ total_counter['total'] += data['total']
474
+
475
+ print("-" * 54)
476
+ if total_consistent['total'] > 0:
477
+ acc = total_consistent['correct'] / total_consistent['total'] * 100
478
+ print(f"{'TOTAL':<12} {'consistent':<12} {total_consistent['correct']:<10} {total_consistent['total']:<10} {acc:.1f}%")
479
+ if total_counter['total'] > 0:
480
+ acc = total_counter['correct'] / total_counter['total'] * 100
481
+ print(f"{'TOTAL':<12} {'counter':<12} {total_counter['correct']:<10} {total_counter['total']:<10} {acc:.1f}%")
482
+
483
+ if total_consistent['total'] > 0 and total_counter['total'] > 0:
484
+ consistent_acc = total_consistent['correct'] / total_consistent['total'] * 100
485
+ counter_acc = total_counter['correct'] / total_counter['total'] * 100
486
+ gap = consistent_acc - counter_acc
487
+ print(f"\nAccuracy Gap (Consistent - Counter): {gap:.1f}%p")
488
+ print(f" -> Larger gap indicates stronger reliance on the 2D heuristic")
489
+
490
+ counter_wrong = [ex for ex in counter_examples if ex['hit'] == 0]
491
+ if len(counter_wrong) > 0:
492
+ print(f"\n🔍 Counter examples wrong: {len(counter_wrong)} / {len(counter_examples)}")
493
+
494
+ return {
495
+ 'model_name': model_name,
496
+ 'consistent_acc': total_consistent['correct'] / total_consistent['total'] * 100 if total_consistent['total'] > 0 else 0,
497
+ 'counter_acc': total_counter['correct'] / total_counter['total'] * 100 if total_counter['total'] > 0 else 0,
498
+ 'consistent_total': total_consistent['total'],
499
+ 'counter_total': total_counter['total'],
500
+ }
501
+
502
+
503
+ def _run_analysis(xlsx_path: str, dataset: str, cache: Optional[Dict] = None,
504
+ verbose: bool = False,
505
+ depth_threshold_ratio: float = 0.05) -> Tuple[Dict, List[Dict]]:
506
+ if dataset == 'cvbench3d':
507
+ return analyze_cvbench3d_results(xlsx_path, verbose=verbose,
508
+ depth_threshold_ratio=depth_threshold_ratio)
509
+ else:
510
+ return analyze_embspatial_results(xlsx_path, cache, verbose=verbose)
511
+
512
+
513
+ def compare_models(xlsx_paths: List[str], dataset: str, cache: Optional[Dict] = None):
514
+ """Compare multiple models side by side."""
515
+ summaries = []
516
+
517
+ for xlsx_path in xlsx_paths:
518
+ results, counter_examples = _run_analysis(xlsx_path, dataset, cache)
519
+ summary = print_analysis_report(xlsx_path, results, counter_examples, dataset)
520
+ summaries.append(summary)
521
+
522
+ max_name_len = max(len(s['model_name']) for s in summaries)
523
+ col_w = max(max_name_len + 2, 40)
524
+ total_w = col_w + 12 + 12 + 10
525
+ print(f"\n{'='*total_w}")
526
+ print("MODEL COMPARISON")
527
+ print(f"{'='*total_w}")
528
+ print(f"{'Model':<{col_w}} {'Consistent':<12} {'Counter':<12} {'Gap':<10}")
529
+ print("-" * total_w)
530
+
531
+ for s in summaries:
532
+ gap = s['consistent_acc'] - s['counter_acc']
533
+ print(f"{s['model_name']:<{col_w}} {s['consistent_acc']:.1f}%{'':<6} {s['counter_acc']:.1f}%{'':<6} {gap:+.1f}%p")
534
+
535
+
536
+ EVAL_OUTPUT_DIR = 'VLMEvalKit/outputs'
537
+
538
+ DEFAULT_MODELS = [
539
+ # Molmo-7B
540
+ 'molmo-7B-O-0924/molmo-7B-O-0924',
541
+ 'molmo-7B-O-0924-data_scale_exp_80k/molmo-7B-O-0924-data_scale_exp_80k',
542
+ 'molmo-7B-O-0924-data_scale_exp_400k/molmo-7B-O-0924-data_scale_exp_400k',
543
+ 'molmo-7B-O-0924-data_scale_exp_800k/molmo-7B-O-0924-data_scale_exp_800k',
544
+ 'molmo-7B-O-0924-data_scale_exp_2m/molmo-7B-O-0924-data_scale_exp_2m',
545
+ # NVILA-Lite-2B
546
+ 'NVILA-Lite-2B/NVILA-Lite-2B',
547
+ 'NVILA-Lite-2B-data-scale-exp-80k/NVILA-Lite-2B-data-scale-exp-80k',
548
+ 'NVILA-Lite-2B-data-scale-exp-400k/NVILA-Lite-2B-data-scale-exp-400k',
549
+ 'NVILA-Lite-2B-data-scale-exp-800k/NVILA-Lite-2B-data-scale-exp-800k',
550
+ 'NVILA-Lite-2B-data-scale-exp-2m/NVILA-Lite-2B-data-scale-exp-2m',
551
+ 'NVILA-Lite-2B-ST-80k-5pct/NVILA-Lite-2B-ST-80k-5pct',
552
+ 'NVILA-Lite-2B-ST-400k-5pct/NVILA-Lite-2B-ST-400k-5pct',
553
+ 'NVILA-Lite-2B-ST-800k-5pct/NVILA-Lite-2B-ST-800k-5pct',
554
+ 'RoboRefer-2B-SFT/RoboRefer-2B-SFT',
555
+ # Qwen2.5-VL-3B
556
+ 'Qwen2.5-VL-3B-Instruct/Qwen2.5-VL-3B-Instruct',
557
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_80k/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k',
558
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_400k/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k',
559
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_800k/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k',
560
+ 'Qwen2.5-VL-3B-Instruct-data_scale_exp_2m/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m',
561
+ 'Qwen3-VL-235B-A22B-Instruct/Qwen3-VL-235B-A22B-Instruct'
562
+ ]
563
+
564
+
565
+ def get_default_xlsx_paths(dataset: str) -> List[str]:
566
+ if dataset == 'cvbench3d':
567
+ return [f'{EVAL_OUTPUT_DIR}/{m}_CV-Bench-3D_chatgpt-0125_result.xlsx'
568
+ for m in DEFAULT_MODELS]
569
+ else:
570
+ return [f'{EVAL_OUTPUT_DIR}/{m}_EmbSpatialBench_openai_result.xlsx'
571
+ for m in DEFAULT_MODELS]
572
+
573
+
574
+ def main():
575
+ parser = argparse.ArgumentParser(description='Counter vs Consistent Example Analysis')
576
+ parser.add_argument('xlsx_files', nargs='*',
577
+ help='Model result xlsx files (uses default model list if omitted)')
578
+ parser.add_argument('--dataset', choices=['embspatial', 'cvbench3d'], default='embspatial',
579
+ help='Benchmark dataset to analyze (default: embspatial)')
580
+ parser.add_argument('--compare', action='store_true', help='Compare multiple models')
581
+ parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
582
+ parser.add_argument('--output', '-o', type=str, help='Save results to file')
583
+ parser.add_argument('--save-cache', type=str,
584
+ help='Save EmbSpatialBench classification cache to JSON')
585
+ parser.add_argument('--load-cache', type=str,
586
+ help='Load EmbSpatialBench classification cache from JSON')
587
+
588
+ args = parser.parse_args()
589
+
590
+ # Build/load cache (EmbSpatialBench only; CV-Bench-3D reads bbox from xlsx directly)
591
+ cache = None
592
+ if args.dataset == 'embspatial':
593
+ if args.load_cache and Path(args.load_cache).exists():
594
+ print(f"Loading cache from {args.load_cache}...")
595
+ with open(args.load_cache, 'r') as f:
596
+ cache = json.load(f)
597
+ else:
598
+ cache = build_classification_cache(verbose=args.verbose)
599
+
600
+ if args.save_cache:
601
+ print(f"Saving cache to {args.save_cache}...")
602
+ with open(args.save_cache, 'w') as f:
603
+ json.dump(cache, f, indent=2)
604
+
605
+ xlsx_files = args.xlsx_files if args.xlsx_files else get_default_xlsx_paths(args.dataset)
606
+
607
+ tee = None
608
+ if args.output:
609
+ tee = TeeWriter(args.output)
610
+ sys.stdout = tee
611
+
612
+ try:
613
+ if args.compare or len(xlsx_files) > 1:
614
+ compare_models(xlsx_files, args.dataset, cache)
615
+ else:
616
+ results, counter_examples = _run_analysis(
617
+ xlsx_files[0], args.dataset, cache, args.verbose
618
+ )
619
+ print_analysis_report(xlsx_files[0], results, counter_examples, args.dataset)
620
+ finally:
621
+ if tee is not None:
622
+ sys.stdout = tee.close()
623
+ print(f"Results saved to {args.output}")
624
+
625
+
626
+ if __name__ == '__main__':
627
+ main()
analyze_heuristic_position.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ 2D Heuristic 답의 선택지 위치(A/B/C/D) 분포 분석
4
+
5
+ 가설: FAR 질문에서 2D heuristic 답(이미지 위쪽 = 가장 먼 물체)이
6
+ 특정 선택지 위치(예: D)에 편중되어 있으면, D bias를 가진 모델이
7
+ FAR에서 더 강한 bias를 보이는 이유를 설명할 수 있음.
8
+
9
+ 2D Heuristic 답 정의:
10
+ - FAR: center_y가 가장 작은 물체 (이미지 위쪽 = "가장 멀다")
11
+ - CLOSE: center_y가 가장 큰 물체 (이미지 아래쪽 = "가장 가깝다")
12
+
13
+ Usage:
14
+ python experiments/analyze_heuristic_position.py
15
+ python experiments/analyze_heuristic_position.py -o experiments/heuristic_position_results.txt
16
+ """
17
+
18
+ import argparse
19
+ import numpy as np
20
+ from datasets import load_dataset
21
+ from collections import Counter, defaultdict
22
+ import sys
23
+
24
+
25
+ class TeeWriter:
26
+ """stdout을 터미널과 파일에 동시에 출력"""
27
+ def __init__(self, filepath):
28
+ self.terminal = sys.stdout
29
+ self.file = open(filepath, 'w', encoding='utf-8')
30
+
31
+ def write(self, message):
32
+ self.terminal.write(message)
33
+ self.file.write(message)
34
+
35
+ def flush(self):
36
+ self.terminal.flush()
37
+ self.file.flush()
38
+
39
+ def close(self):
40
+ self.file.close()
41
+ return self.terminal
42
+
43
+
44
+ def get_bbox_center_y(bbox):
45
+ """BBox [x, y, width, height] -> center y coordinate"""
46
+ return bbox[1] + bbox[3] / 2
47
+
48
+
49
+ def find_heuristic_answer(relation, objects, answer_options):
50
+ """
51
+ 2D heuristic이 선택할 답을 찾는다.
52
+
53
+ FAR: center_y가 가장 작은 물체 (이미지 위쪽 = "가장 멀다")
54
+ CLOSE: center_y가 가장 큰 물체 (이미지 아래쪽 = "가장 가깝다")
55
+
56
+ Returns:
57
+ heuristic_position: 0~3 (A~D), or None if not found
58
+ heuristic_name: 물체 이름
59
+ """
60
+ bboxes = objects['bbox']
61
+ names = objects['name']
62
+
63
+ if len(bboxes) < 2:
64
+ return None, None
65
+
66
+ # 각 물체의 center_y 계산
67
+ center_ys = [(i, names[i], get_bbox_center_y(bboxes[i])) for i in range(len(bboxes))]
68
+
69
+ if relation == 'far':
70
+ # 가장 작은 center_y (이미지 위쪽) = heuristic이 "가장 멀다"고 판단
71
+ heuristic_obj = min(center_ys, key=lambda x: x[2])
72
+ else: # close
73
+ # 가장 큰 center_y (이미지 아래쪽) = heuristic이 "가장 가깝다"고 판단
74
+ heuristic_obj = max(center_ys, key=lambda x: x[2])
75
+
76
+ heuristic_name = heuristic_obj[1]
77
+
78
+ # answer_options에서 이 물체의 위치(A/B/C/D) 찾기
79
+ if heuristic_name in answer_options:
80
+ position = answer_options.index(heuristic_name)
81
+ return position, heuristic_name
82
+
83
+ return None, heuristic_name
84
+
85
+
86
+ def main():
87
+ parser = argparse.ArgumentParser(description='2D Heuristic 답의 선택지 위치 분포 분석')
88
+ parser.add_argument('-o', '--output', type=str, help='Save results to file')
89
+ args = parser.parse_args()
90
+
91
+ if args.output:
92
+ tee = TeeWriter(args.output)
93
+ sys.stdout = tee
94
+
95
+ print("Loading EmbSpatial-Bench dataset...")
96
+ ds = load_dataset('FlagEval/EmbSpatial-Bench', split='test')
97
+
98
+ position_labels = ['A', 'B', 'C', 'D']
99
+
100
+ # 전체 통계
101
+ heuristic_pos_far = [] # FAR에서 heuristic 답의 위치
102
+ heuristic_pos_close = [] # CLOSE에서 heuristic 답의 위치
103
+ gt_pos_far = [] # FAR에서 GT 답의 위치
104
+ gt_pos_close = [] # CLOSE에서 GT 답의 위치
105
+
106
+ # heuristic == GT인지 여부
107
+ heuristic_is_gt_far = 0
108
+ heuristic_is_gt_close = 0
109
+ total_far = 0
110
+ total_close = 0
111
+
112
+ # 상세 분석용
113
+ not_found_count = 0
114
+
115
+ for item in ds:
116
+ relation = item['relation']
117
+ if relation not in ['far', 'close']:
118
+ continue
119
+
120
+ objects = item['objects']
121
+ answer_options = item['answer_options']
122
+ gt_answer_idx = item['answer']
123
+
124
+ heuristic_pos, heuristic_name = find_heuristic_answer(relation, objects, answer_options)
125
+
126
+ if heuristic_pos is None:
127
+ not_found_count += 1
128
+ continue
129
+
130
+ if relation == 'far':
131
+ total_far += 1
132
+ heuristic_pos_far.append(heuristic_pos)
133
+ gt_pos_far.append(gt_answer_idx)
134
+ if heuristic_pos == gt_answer_idx:
135
+ heuristic_is_gt_far += 1
136
+ else:
137
+ total_close += 1
138
+ heuristic_pos_close.append(heuristic_pos)
139
+ gt_pos_close.append(gt_answer_idx)
140
+ if heuristic_pos == gt_answer_idx:
141
+ heuristic_is_gt_close += 1
142
+
143
+ # ===== 결과 출력 =====
144
+ print(f"\n{'='*70}")
145
+ print("2D Heuristic 답의 선택지 위치(A/B/C/D) 분포 분석")
146
+ print(f"{'='*70}")
147
+ print(f"\nHeuristic 정의:")
148
+ print(f" FAR: center_y가 가장 작은 물체 (이미지 위쪽 = '가장 멀다')")
149
+ print(f" CLOSE: center_y가 가장 큰 물체 (이미지 아래쪽 = '가장 가깝다')")
150
+ print(f"\n매칭 실패: {not_found_count}개 (answer_options에 heuristic 물체가 없음)")
151
+
152
+ for label, h_positions, g_positions, total, h_is_gt in [
153
+ ('FAR', heuristic_pos_far, gt_pos_far, total_far, heuristic_is_gt_far),
154
+ ('CLOSE', heuristic_pos_close, gt_pos_close, total_close, heuristic_is_gt_close),
155
+ ('FAR+CLOSE', heuristic_pos_far + heuristic_pos_close,
156
+ gt_pos_far + gt_pos_close, total_far + total_close,
157
+ heuristic_is_gt_far + heuristic_is_gt_close),
158
+ ]:
159
+ print(f"\n{'─'*60}")
160
+ print(f" {label} (n={total})")
161
+ print(f"{'─'*60}")
162
+
163
+ # Heuristic 답 위치 분포
164
+ h_counter = Counter(h_positions)
165
+ print(f"\n [Heuristic 답의 위치 분포]")
166
+ print(f" {'Position':<10} {'Count':<10} {'Ratio':<10}")
167
+ for i, pl in enumerate(position_labels):
168
+ cnt = h_counter.get(i, 0)
169
+ ratio = cnt / total * 100 if total > 0 else 0
170
+ print(f" {pl:<10} {cnt:<10} {ratio:.1f}%")
171
+ h_std = np.std([h_counter.get(i, 0) / total * 100 for i in range(4)])
172
+ print(f" Std: {h_std:.1f}%p")
173
+
174
+ # GT 답 위치 분포 (참고용)
175
+ g_counter = Counter(g_positions)
176
+ print(f"\n [GT 답의 위치 분포]")
177
+ print(f" {'Position':<10} {'Count':<10} {'Ratio':<10}")
178
+ for i, pl in enumerate(position_labels):
179
+ cnt = g_counter.get(i, 0)
180
+ ratio = cnt / total * 100 if total > 0 else 0
181
+ print(f" {pl:<10} {cnt:<10} {ratio:.1f}%")
182
+ g_std = np.std([g_counter.get(i, 0) / total * 100 for i in range(4)])
183
+ print(f" Std: {g_std:.1f}%p")
184
+
185
+ # Heuristic == GT 비율
186
+ h_is_gt_total = heuristic_is_gt_far + heuristic_is_gt_close if label == 'FAR+CLOSE' else h_is_gt
187
+ print(f"\n Heuristic == GT: {h_is_gt}/{total} ({h_is_gt/total*100:.1f}%)")
188
+ print(f" → 이 비율이 Consistent 샘플 비율과 유사해야 함")
189
+
190
+ # ===== Heuristic 답 위치 vs GT 답 위치 교차 분석 =====
191
+ print(f"\n{'='*70}")
192
+ print("Heuristic 답 위치 vs GT 답 위치 교차 분석")
193
+ print(f"{'='*70}")
194
+
195
+ for label, h_positions, g_positions, total in [
196
+ ('FAR', heuristic_pos_far, gt_pos_far, total_far),
197
+ ('CLOSE', heuristic_pos_close, gt_pos_close, total_close),
198
+ ]:
199
+ print(f"\n {label}: Heuristic 위치별 GT 위치 분포")
200
+ print(f" (행: Heuristic 위치, 열: GT 위치)")
201
+ print(f"\n {'Heur\\GT':<10}", end='')
202
+ for pl in position_labels:
203
+ print(f"{pl:<10}", end='')
204
+ print(f"{'Total':<10}")
205
+ print(f" {'─'*50}")
206
+
207
+ cross = defaultdict(lambda: defaultdict(int))
208
+ for h, g in zip(h_positions, g_positions):
209
+ cross[h][g] += 1
210
+
211
+ for hi, hpl in enumerate(position_labels):
212
+ row_total = sum(cross[hi].values())
213
+ if row_total == 0:
214
+ continue
215
+ print(f" {hpl:<10}", end='')
216
+ for gi in range(4):
217
+ cnt = cross[hi][gi]
218
+ pct = cnt / row_total * 100 if row_total > 0 else 0
219
+ print(f"{cnt}({pct:.0f}%){'':<2}", end='')
220
+ print(f"{row_total}")
221
+
222
+ # ===== 핵심 요약 =====
223
+ print(f"\n{'='*70}")
224
+ print("핵심 요약")
225
+ print(f"{'='*70}")
226
+
227
+ far_h_counter = Counter(heuristic_pos_far)
228
+ close_h_counter = Counter(heuristic_pos_close)
229
+
230
+ far_max_pos = max(range(4), key=lambda i: far_h_counter.get(i, 0))
231
+ far_max_pct = far_h_counter.get(far_max_pos, 0) / total_far * 100
232
+ close_max_pos = max(range(4), key=lambda i: close_h_counter.get(i, 0))
233
+ close_max_pct = close_h_counter.get(close_max_pos, 0) / total_close * 100
234
+
235
+ print(f"\n FAR heuristic 답 최다 위치: {position_labels[far_max_pos]} ({far_max_pct:.1f}%)")
236
+ print(f" CLOSE heuristic 답 최다 위치: {position_labels[close_max_pos]} ({close_max_pct:.1f}%)")
237
+
238
+ far_d_pct = far_h_counter.get(3, 0) / total_far * 100
239
+ close_d_pct = close_h_counter.get(3, 0) / total_close * 100
240
+ print(f"\n FAR heuristic 답이 D 위치: {far_h_counter.get(3, 0)}/{total_far} ({far_d_pct:.1f}%)")
241
+ print(f" CLOSE heuristic 답이 D 위치: {close_h_counter.get(3, 0)}/{total_close} ({close_d_pct:.1f}%)")
242
+
243
+ if far_d_pct > 30:
244
+ print(f"\n ⚠ FAR에서 heuristic 답이 D에 {far_d_pct:.1f}% 편중!")
245
+ print(f" → D bias 모델이 FAR에서 heuristic에 따라 D를 선택하는 경향 설명 가능")
246
+ else:
247
+ print(f"\n FAR heuristic 답의 D 위치 비율이 균등({far_d_pct:.1f}%)이므로,")
248
+ print(f" D bias가 FAR에서 더 심한 것은 선택지 배치 때문이 아닌 다른 요인일 가능성")
249
+
250
+ if args.output:
251
+ sys.stdout = tee.close()
252
+ print(f"Results saved to {args.output}")
253
+
254
+
255
+ if __name__ == '__main__':
256
+ main()
answer_bias_results.txt ADDED
@@ -0,0 +1,1607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ================================================================================
3
+ Model: molmo-7B-O-0924
4
+ ================================================================================
5
+
6
+ --- ALL (n=3640) ---
7
+
8
+ GT Answer Distribution:
9
+ Pos Count Pct Acc when GT
10
+ -----------------------------------
11
+ A 958 26.3% 68.6%
12
+ B 912 25.1% 66.9%
13
+ C 852 23.4% 56.7%
14
+ D 918 25.2% 50.2%
15
+
16
+ Model Prediction Distribution:
17
+ Pos Count Pct Acc when Pred
18
+ -----------------------------------
19
+ A 1102 30.3% 59.6%
20
+ B 1022 28.1% 59.7%
21
+ C 798 21.9% 60.5%
22
+ D 718 19.7% 64.2%
23
+
24
+ Bias Indicators:
25
+ GT Distribution Std: 1.04%p (uniform=0)
26
+ Pred Distribution Std: 4.32%p (uniform=0)
27
+ Overall Accuracy: 60.7%
28
+
29
+ --- FAR+CLOSE (n=1206) ---
30
+
31
+ GT Answer Distribution:
32
+ Pos Count Pct Acc when GT
33
+ -----------------------------------
34
+ A 319 26.5% 63.0%
35
+ B 290 24.0% 59.7%
36
+ C 289 24.0% 54.7%
37
+ D 308 25.5% 58.1%
38
+
39
+ Model Prediction Distribution:
40
+ Pos Count Pct Acc when Pred
41
+ -----------------------------------
42
+ A 329 27.3% 61.1%
43
+ B 286 23.7% 60.5%
44
+ C 274 22.7% 57.7%
45
+ D 317 26.3% 56.5%
46
+
47
+ Bias Indicators:
48
+ GT Distribution Std: 1.05%p (uniform=0)
49
+ Pred Distribution Std: 1.85%p (uniform=0)
50
+ Overall Accuracy: 59.0%
51
+
52
+ --- FAR (n=594) ---
53
+
54
+ GT Answer Distribution:
55
+ Pos Count Pct Acc when GT
56
+ -----------------------------------
57
+ A 159 26.8% 59.1%
58
+ B 156 26.3% 62.2%
59
+ C 130 21.9% 63.1%
60
+ D 149 25.1% 62.4%
61
+
62
+ Model Prediction Distribution:
63
+ Pos Count Pct Acc when Pred
64
+ -----------------------------------
65
+ A 141 23.7% 66.7%
66
+ B 146 24.6% 66.4%
67
+ C 141 23.7% 58.2%
68
+ D 166 27.9% 56.0%
69
+
70
+ Bias Indicators:
71
+ GT Distribution Std: 1.90%p (uniform=0)
72
+ Pred Distribution Std: 1.74%p (uniform=0)
73
+ Overall Accuracy: 61.6%
74
+
75
+ --- CLOSE (n=612) ---
76
+
77
+ GT Answer Distribution:
78
+ Pos Count Pct Acc when GT
79
+ -----------------------------------
80
+ A 160 26.1% 66.9%
81
+ B 134 21.9% 56.7%
82
+ C 159 26.0% 47.8%
83
+ D 159 26.0% 54.1%
84
+
85
+ Model Prediction Distribution:
86
+ Pos Count Pct Acc when Pred
87
+ -----------------------------------
88
+ A 188 30.7% 56.9%
89
+ B 140 22.9% 54.3%
90
+ C 133 21.7% 57.1%
91
+ D 151 24.7% 57.0%
92
+
93
+ Bias Indicators:
94
+ GT Distribution Std: 1.79%p (uniform=0)
95
+ Pred Distribution Std: 3.46%p (uniform=0)
96
+ Overall Accuracy: 56.4%
97
+
98
+ ================================================================================
99
+ Model: molmo-7B-O-0924-data_scale_exp_80k
100
+ ================================================================================
101
+
102
+ --- ALL (n=3640) ---
103
+
104
+ GT Answer Distribution:
105
+ Pos Count Pct Acc when GT
106
+ -----------------------------------
107
+ A 958 26.3% 58.7%
108
+ B 912 25.1% 51.6%
109
+ C 852 23.4% 60.4%
110
+ D 918 25.2% 41.2%
111
+
112
+ Model Prediction Distribution:
113
+ Pos Count Pct Acc when Pred
114
+ -----------------------------------
115
+ A 1021 28.0% 55.0%
116
+ B 876 24.1% 53.8%
117
+ C 1035 28.4% 49.8%
118
+ D 708 19.5% 53.4%
119
+
120
+ Bias Indicators:
121
+ GT Distribution Std: 1.04%p (uniform=0)
122
+ Pred Distribution Std: 3.63%p (uniform=0)
123
+ Overall Accuracy: 52.9%
124
+
125
+ --- FAR+CLOSE (n=1206) ---
126
+
127
+ GT Answer Distribution:
128
+ Pos Count Pct Acc when GT
129
+ -----------------------------------
130
+ A 319 26.5% 58.3%
131
+ B 290 24.0% 53.4%
132
+ C 289 24.0% 56.4%
133
+ D 308 25.5% 52.6%
134
+
135
+ Model Prediction Distribution:
136
+ Pos Count Pct Acc when Pred
137
+ -----------------------------------
138
+ A 315 26.1% 59.0%
139
+ B 287 23.8% 54.0%
140
+ C 285 23.6% 57.2%
141
+ D 319 26.5% 50.8%
142
+
143
+ Bias Indicators:
144
+ GT Distribution Std: 1.05%p (uniform=0)
145
+ Pred Distribution Std: 1.29%p (uniform=0)
146
+ Overall Accuracy: 55.2%
147
+
148
+ --- FAR (n=594) ---
149
+
150
+ GT Answer Distribution:
151
+ Pos Count Pct Acc when GT
152
+ -----------------------------------
153
+ A 159 26.8% 49.7%
154
+ B 156 26.3% 55.8%
155
+ C 130 21.9% 53.1%
156
+ D 149 25.1% 49.7%
157
+
158
+ Model Prediction Distribution:
159
+ Pos Count Pct Acc when Pred
160
+ -----------------------------------
161
+ A 141 23.7% 56.0%
162
+ B 163 27.4% 53.4%
163
+ C 128 21.5% 53.9%
164
+ D 162 27.3% 45.7%
165
+
166
+ Bias Indicators:
167
+ GT Distribution Std: 1.90%p (uniform=0)
168
+ Pred Distribution Std: 2.48%p (uniform=0)
169
+ Overall Accuracy: 52.0%
170
+
171
+ --- CLOSE (n=612) ---
172
+
173
+ GT Answer Distribution:
174
+ Pos Count Pct Acc when GT
175
+ -----------------------------------
176
+ A 160 26.1% 66.9%
177
+ B 134 21.9% 50.7%
178
+ C 159 26.0% 59.1%
179
+ D 159 26.0% 55.3%
180
+
181
+ Model Prediction Distribution:
182
+ Pos Count Pct Acc when Pred
183
+ -----------------------------------
184
+ A 174 28.4% 61.5%
185
+ B 124 20.3% 54.8%
186
+ C 157 25.7% 59.9%
187
+ D 157 25.7% 56.1%
188
+
189
+ Bias Indicators:
190
+ GT Distribution Std: 1.79%p (uniform=0)
191
+ Pred Distribution Std: 2.96%p (uniform=0)
192
+ Overall Accuracy: 58.3%
193
+
194
+ ================================================================================
195
+ Model: molmo-7B-O-0924-data_scale_exp_400k
196
+ ================================================================================
197
+
198
+ --- ALL (n=3640) ---
199
+
200
+ GT Answer Distribution:
201
+ Pos Count Pct Acc when GT
202
+ -----------------------------------
203
+ A 958 26.3% 60.9%
204
+ B 912 25.1% 63.4%
205
+ C 852 23.4% 68.5%
206
+ D 918 25.2% 67.1%
207
+
208
+ Model Prediction Distribution:
209
+ Pos Count Pct Acc when Pred
210
+ -----------------------------------
211
+ A 862 23.7% 67.6%
212
+ B 860 23.6% 67.2%
213
+ C 968 26.6% 60.3%
214
+ D 950 26.1% 64.8%
215
+
216
+ Bias Indicators:
217
+ GT Distribution Std: 1.04%p (uniform=0)
218
+ Pred Distribution Std: 1.36%p (uniform=0)
219
+ Overall Accuracy: 64.9%
220
+
221
+ --- FAR+CLOSE (n=1206) ---
222
+
223
+ GT Answer Distribution:
224
+ Pos Count Pct Acc when GT
225
+ -----------------------------------
226
+ A 319 26.5% 52.4%
227
+ B 290 24.0% 56.6%
228
+ C 289 24.0% 61.9%
229
+ D 308 25.5% 55.8%
230
+
231
+ Model Prediction Distribution:
232
+ Pos Count Pct Acc when Pred
233
+ -----------------------------------
234
+ A 290 24.0% 57.6%
235
+ B 271 22.5% 60.5%
236
+ C 344 28.5% 52.0%
237
+ D 301 25.0% 57.1%
238
+
239
+ Bias Indicators:
240
+ GT Distribution Std: 1.05%p (uniform=0)
241
+ Pred Distribution Std: 2.22%p (uniform=0)
242
+ Overall Accuracy: 56.6%
243
+
244
+ --- FAR (n=594) ---
245
+
246
+ GT Answer Distribution:
247
+ Pos Count Pct Acc when GT
248
+ -----------------------------------
249
+ A 159 26.8% 53.5%
250
+ B 156 26.3% 53.8%
251
+ C 130 21.9% 66.2%
252
+ D 149 25.1% 63.1%
253
+
254
+ Model Prediction Distribution:
255
+ Pos Count Pct Acc when Pred
256
+ -----------------------------------
257
+ A 138 23.2% 61.6%
258
+ B 129 21.7% 65.1%
259
+ C 171 28.8% 50.3%
260
+ D 156 26.3% 60.3%
261
+
262
+ Bias Indicators:
263
+ GT Distribution Std: 1.90%p (uniform=0)
264
+ Pred Distribution Std: 2.73%p (uniform=0)
265
+ Overall Accuracy: 58.8%
266
+
267
+ --- CLOSE (n=612) ---
268
+
269
+ GT Answer Distribution:
270
+ Pos Count Pct Acc when GT
271
+ -----------------------------------
272
+ A 160 26.1% 51.2%
273
+ B 134 21.9% 59.7%
274
+ C 159 26.0% 58.5%
275
+ D 159 26.0% 49.1%
276
+
277
+ Model Prediction Distribution:
278
+ Pos Count Pct Acc when Pred
279
+ -----------------------------------
280
+ A 152 24.8% 53.9%
281
+ B 142 23.2% 56.3%
282
+ C 173 28.3% 53.8%
283
+ D 145 23.7% 53.8%
284
+
285
+ Bias Indicators:
286
+ GT Distribution Std: 1.79%p (uniform=0)
287
+ Pred Distribution Std: 1.98%p (uniform=0)
288
+ Overall Accuracy: 54.4%
289
+
290
+ ================================================================================
291
+ Model: molmo-7B-O-0924-data_scale_exp_800k
292
+ ================================================================================
293
+
294
+ --- ALL (n=3640) ---
295
+
296
+ GT Answer Distribution:
297
+ Pos Count Pct Acc when GT
298
+ -----------------------------------
299
+ A 958 26.3% 59.3%
300
+ B 912 25.1% 63.8%
301
+ C 852 23.4% 75.2%
302
+ D 918 25.2% 78.9%
303
+
304
+ Model Prediction Distribution:
305
+ Pos Count Pct Acc when Pred
306
+ -----------------------------------
307
+ A 723 19.9% 78.6%
308
+ B 769 21.1% 75.7%
309
+ C 984 27.0% 65.1%
310
+ D 1164 32.0% 62.2%
311
+
312
+ Bias Indicators:
313
+ GT Distribution Std: 1.04%p (uniform=0)
314
+ Pred Distribution Std: 4.85%p (uniform=0)
315
+ Overall Accuracy: 69.1%
316
+
317
+ --- FAR+CLOSE (n=1206) ---
318
+
319
+ GT Answer Distribution:
320
+ Pos Count Pct Acc when GT
321
+ -----------------------------------
322
+ A 319 26.5% 51.1%
323
+ B 290 24.0% 45.5%
324
+ C 289 24.0% 61.9%
325
+ D 308 25.5% 80.5%
326
+
327
+ Model Prediction Distribution:
328
+ Pos Count Pct Acc when Pred
329
+ -----------------------------------
330
+ A 227 18.8% 71.8%
331
+ B 185 15.3% 71.4%
332
+ C 302 25.0% 59.3%
333
+ D 492 40.8% 50.4%
334
+
335
+ Bias Indicators:
336
+ GT Distribution Std: 1.05%p (uniform=0)
337
+ Pred Distribution Std: 9.76%p (uniform=0)
338
+ Overall Accuracy: 59.9%
339
+
340
+ --- FAR (n=594) ---
341
+
342
+ GT Answer Distribution:
343
+ Pos Count Pct Acc when GT
344
+ -----------------------------------
345
+ A 159 26.8% 52.8%
346
+ B 156 26.3% 41.7%
347
+ C 130 21.9% 63.1%
348
+ D 149 25.1% 86.6%
349
+
350
+ Model Prediction Distribution:
351
+ Pos Count Pct Acc when Pred
352
+ -----------------------------------
353
+ A 114 19.2% 73.7%
354
+ B 81 13.6% 80.2%
355
+ C 135 22.7% 60.7%
356
+ D 264 44.4% 48.9%
357
+
358
+ Bias Indicators:
359
+ GT Distribution Std: 1.90%p (uniform=0)
360
+ Pred Distribution Std: 11.68%p (uniform=0)
361
+ Overall Accuracy: 60.6%
362
+
363
+ --- CLOSE (n=612) ---
364
+
365
+ GT Answer Distribution:
366
+ Pos Count Pct Acc when GT
367
+ -----------------------------------
368
+ A 160 26.1% 49.4%
369
+ B 134 21.9% 50.0%
370
+ C 159 26.0% 61.0%
371
+ D 159 26.0% 74.8%
372
+
373
+ Model Prediction Distribution:
374
+ Pos Count Pct Acc when Pred
375
+ -----------------------------------
376
+ A 113 18.5% 69.9%
377
+ B 104 17.0% 64.4%
378
+ C 167 27.3% 58.1%
379
+ D 228 37.3% 52.2%
380
+
381
+ Bias Indicators:
382
+ GT Distribution Std: 1.79%p (uniform=0)
383
+ Pred Distribution Std: 8.10%p (uniform=0)
384
+ Overall Accuracy: 59.2%
385
+
386
+ ================================================================================
387
+ Model: molmo-7B-O-0924-data_scale_exp_2m
388
+ ================================================================================
389
+
390
+ --- ALL (n=3640) ---
391
+
392
+ GT Answer Distribution:
393
+ Pos Count Pct Acc when GT
394
+ -----------------------------------
395
+ A 958 26.3% 70.9%
396
+ B 912 25.1% 72.4%
397
+ C 852 23.4% 72.1%
398
+ D 918 25.2% 81.8%
399
+
400
+ Model Prediction Distribution:
401
+ Pos Count Pct Acc when Pred
402
+ -----------------------------------
403
+ A 860 23.6% 79.0%
404
+ B 824 22.6% 80.1%
405
+ C 807 22.2% 76.1%
406
+ D 1149 31.6% 65.4%
407
+
408
+ Bias Indicators:
409
+ GT Distribution Std: 1.04%p (uniform=0)
410
+ Pred Distribution Std: 3.83%p (uniform=0)
411
+ Overall Accuracy: 74.3%
412
+
413
+ --- FAR+CLOSE (n=1206) ---
414
+
415
+ GT Answer Distribution:
416
+ Pos Count Pct Acc when GT
417
+ -----------------------------------
418
+ A 319 26.5% 56.4%
419
+ B 290 24.0% 54.5%
420
+ C 289 24.0% 59.5%
421
+ D 308 25.5% 72.1%
422
+
423
+ Model Prediction Distribution:
424
+ Pos Count Pct Acc when Pred
425
+ -----------------------------------
426
+ A 276 22.9% 65.2%
427
+ B 227 18.8% 69.6%
428
+ C 276 22.9% 62.3%
429
+ D 427 35.4% 52.0%
430
+
431
+ Bias Indicators:
432
+ GT Distribution Std: 1.05%p (uniform=0)
433
+ Pred Distribution Std: 6.23%p (uniform=0)
434
+ Overall Accuracy: 60.7%
435
+
436
+ --- FAR (n=594) ---
437
+
438
+ GT Answer Distribution:
439
+ Pos Count Pct Acc when GT
440
+ -----------------------------------
441
+ A 159 26.8% 49.7%
442
+ B 156 26.3% 53.8%
443
+ C 130 21.9% 55.4%
444
+ D 149 25.1% 77.2%
445
+
446
+ Model Prediction Distribution:
447
+ Pos Count Pct Acc when Pred
448
+ -----------------------------------
449
+ A 127 21.4% 62.2%
450
+ B 119 20.0% 70.6%
451
+ C 116 19.5% 62.1%
452
+ D 232 39.1% 49.6%
453
+
454
+ Bias Indicators:
455
+ GT Distribution Std: 1.90%p (uniform=0)
456
+ Pred Distribution Std: 8.14%p (uniform=0)
457
+ Overall Accuracy: 58.9%
458
+
459
+ --- CLOSE (n=612) ---
460
+
461
+ GT Answer Distribution:
462
+ Pos Count Pct Acc when GT
463
+ -----------------------------------
464
+ A 160 26.1% 63.1%
465
+ B 134 21.9% 55.2%
466
+ C 159 26.0% 62.9%
467
+ D 159 26.0% 67.3%
468
+
469
+ Model Prediction Distribution:
470
+ Pos Count Pct Acc when Pred
471
+ -----------------------------------
472
+ A 149 24.3% 67.8%
473
+ B 108 17.6% 68.5%
474
+ C 160 26.1% 62.5%
475
+ D 195 31.9% 54.9%
476
+
477
+ Bias Indicators:
478
+ GT Distribution Std: 1.79%p (uniform=0)
479
+ Pred Distribution Std: 5.07%p (uniform=0)
480
+ Overall Accuracy: 62.4%
481
+
482
+ ================================================================================
483
+ Model: NVILA-Lite-2B
484
+ ================================================================================
485
+
486
+ --- ALL (n=3640) ---
487
+
488
+ GT Answer Distribution:
489
+ Pos Count Pct Acc when GT
490
+ -----------------------------------
491
+ A 958 26.3% 19.7%
492
+ B 912 25.1% 16.9%
493
+ C 852 23.4% 14.6%
494
+ D 918 25.2% 16.6%
495
+
496
+ Model Prediction Distribution:
497
+ Pos Count Pct Acc when Pred
498
+ -----------------------------------
499
+ A 352 32.0% 53.7%
500
+ B 290 26.3% 53.1%
501
+ C 210 19.1% 59.0%
502
+ D 249 22.6% 61.0%
503
+
504
+ Bias Indicators:
505
+ GT Distribution Std: 1.04%p (uniform=0)
506
+ Pred Distribution Std: 4.77%p (uniform=0)
507
+ Overall Accuracy: 17.0%
508
+
509
+ --- FAR+CLOSE (n=1206) ---
510
+
511
+ GT Answer Distribution:
512
+ Pos Count Pct Acc when GT
513
+ -----------------------------------
514
+ A 319 26.5% 17.6%
515
+ B 290 24.0% 13.8%
516
+ C 289 24.0% 11.4%
517
+ D 308 25.5% 14.3%
518
+
519
+ Model Prediction Distribution:
520
+ Pos Count Pct Acc when Pred
521
+ -----------------------------------
522
+ A 120 33.0% 46.7%
523
+ B 91 25.0% 44.0%
524
+ C 71 19.5% 46.5%
525
+ D 82 22.5% 53.7%
526
+
527
+ Bias Indicators:
528
+ GT Distribution Std: 1.05%p (uniform=0)
529
+ Pred Distribution Std: 4.99%p (uniform=0)
530
+ Overall Accuracy: 14.3%
531
+
532
+ --- FAR (n=594) ---
533
+
534
+ GT Answer Distribution:
535
+ Pos Count Pct Acc when GT
536
+ -----------------------------------
537
+ A 159 26.8% 18.2%
538
+ B 156 26.3% 12.8%
539
+ C 130 21.9% 11.5%
540
+ D 149 25.1% 11.4%
541
+
542
+ Model Prediction Distribution:
543
+ Pos Count Pct Acc when Pred
544
+ -----------------------------------
545
+ A 65 36.5% 44.6%
546
+ B 37 20.8% 54.1%
547
+ C 32 18.0% 46.9%
548
+ D 44 24.7% 38.6%
549
+
550
+ Bias Indicators:
551
+ GT Distribution Std: 1.90%p (uniform=0)
552
+ Pred Distribution Std: 7.07%p (uniform=0)
553
+ Overall Accuracy: 13.6%
554
+
555
+ --- CLOSE (n=612) ---
556
+
557
+ GT Answer Distribution:
558
+ Pos Count Pct Acc when GT
559
+ -----------------------------------
560
+ A 160 26.1% 16.9%
561
+ B 134 21.9% 14.9%
562
+ C 159 26.0% 11.3%
563
+ D 159 26.0% 17.0%
564
+
565
+ Model Prediction Distribution:
566
+ Pos Count Pct Acc when Pred
567
+ -----------------------------------
568
+ A 55 29.6% 49.1%
569
+ B 54 29.0% 37.0%
570
+ C 39 21.0% 46.2%
571
+ D 38 20.4% 71.1%
572
+
573
+ Bias Indicators:
574
+ GT Distribution Std: 1.79%p (uniform=0)
575
+ Pred Distribution Std: 4.31%p (uniform=0)
576
+ Overall Accuracy: 15.0%
577
+
578
+ ================================================================================
579
+ Model: NVILA-Lite-2B-data-scale-exp-80k
580
+ ================================================================================
581
+
582
+ --- ALL (n=3640) ---
583
+
584
+ GT Answer Distribution:
585
+ Pos Count Pct Acc when GT
586
+ -----------------------------------
587
+ A 958 26.3% 63.9%
588
+ B 912 25.1% 69.2%
589
+ C 852 23.4% 61.7%
590
+ D 918 25.2% 65.5%
591
+
592
+ Model Prediction Distribution:
593
+ Pos Count Pct Acc when Pred
594
+ -----------------------------------
595
+ A 930 25.5% 65.8%
596
+ B 1010 27.7% 62.5%
597
+ C 786 21.6% 66.9%
598
+ D 914 25.1% 65.8%
599
+
600
+ Bias Indicators:
601
+ GT Distribution Std: 1.04%p (uniform=0)
602
+ Pred Distribution Std: 2.21%p (uniform=0)
603
+ Overall Accuracy: 65.1%
604
+
605
+ --- FAR+CLOSE (n=1206) ---
606
+
607
+ GT Answer Distribution:
608
+ Pos Count Pct Acc when GT
609
+ -----------------------------------
610
+ A 319 26.5% 58.3%
611
+ B 290 24.0% 52.8%
612
+ C 289 24.0% 43.9%
613
+ D 308 25.5% 46.4%
614
+
615
+ Model Prediction Distribution:
616
+ Pos Count Pct Acc when Pred
617
+ -----------------------------------
618
+ A 363 30.1% 51.2%
619
+ B 337 27.9% 45.4%
620
+ C 234 19.4% 54.3%
621
+ D 272 22.6% 52.6%
622
+
623
+ Bias Indicators:
624
+ GT Distribution Std: 1.05%p (uniform=0)
625
+ Pred Distribution Std: 4.24%p (uniform=0)
626
+ Overall Accuracy: 50.5%
627
+
628
+ --- FAR (n=594) ---
629
+
630
+ GT Answer Distribution:
631
+ Pos Count Pct Acc when GT
632
+ -----------------------------------
633
+ A 159 26.8% 64.8%
634
+ B 156 26.3% 58.3%
635
+ C 130 21.9% 46.2%
636
+ D 149 25.1% 51.0%
637
+
638
+ Model Prediction Distribution:
639
+ Pos Count Pct Acc when Pred
640
+ -----------------------------------
641
+ A 194 32.7% 53.1%
642
+ B 171 28.8% 53.2%
643
+ C 108 18.2% 55.6%
644
+ D 121 20.4% 62.8%
645
+
646
+ Bias Indicators:
647
+ GT Distribution Std: 1.90%p (uniform=0)
648
+ Pred Distribution Std: 5.94%p (uniform=0)
649
+ Overall Accuracy: 55.6%
650
+
651
+ --- CLOSE (n=612) ---
652
+
653
+ GT Answer Distribution:
654
+ Pos Count Pct Acc when GT
655
+ -----------------------------------
656
+ A 160 26.1% 51.9%
657
+ B 134 21.9% 46.3%
658
+ C 159 26.0% 42.1%
659
+ D 159 26.0% 42.1%
660
+
661
+ Model Prediction Distribution:
662
+ Pos Count Pct Acc when Pred
663
+ -----------------------------------
664
+ A 169 27.6% 49.1%
665
+ B 166 27.1% 37.3%
666
+ C 126 20.6% 53.2%
667
+ D 151 24.7% 44.4%
668
+
669
+ Bias Indicators:
670
+ GT Distribution Std: 1.79%p (uniform=0)
671
+ Pred Distribution Std: 2.78%p (uniform=0)
672
+ Overall Accuracy: 45.6%
673
+
674
+ ================================================================================
675
+ Model: NVILA-Lite-2B-data-scale-exp-400k
676
+ ================================================================================
677
+
678
+ --- ALL (n=3640) ---
679
+
680
+ GT Answer Distribution:
681
+ Pos Count Pct Acc when GT
682
+ -----------------------------------
683
+ A 958 26.3% 58.9%
684
+ B 912 25.1% 60.0%
685
+ C 852 23.4% 65.3%
686
+ D 918 25.2% 64.5%
687
+
688
+ Model Prediction Distribution:
689
+ Pos Count Pct Acc when Pred
690
+ -----------------------------------
691
+ A 889 24.4% 63.4%
692
+ B 849 23.3% 64.4%
693
+ C 905 24.9% 61.4%
694
+ D 997 27.4% 59.4%
695
+
696
+ Bias Indicators:
697
+ GT Distribution Std: 1.04%p (uniform=0)
698
+ Pred Distribution Std: 1.49%p (uniform=0)
699
+ Overall Accuracy: 62.1%
700
+
701
+ --- FAR+CLOSE (n=1206) ---
702
+
703
+ GT Answer Distribution:
704
+ Pos Count Pct Acc when GT
705
+ -----------------------------------
706
+ A 319 26.5% 58.9%
707
+ B 290 24.0% 53.4%
708
+ C 289 24.0% 59.5%
709
+ D 308 25.5% 52.9%
710
+
711
+ Model Prediction Distribution:
712
+ Pos Count Pct Acc when Pred
713
+ -----------------------------------
714
+ A 341 28.3% 55.1%
715
+ B 267 22.1% 58.1%
716
+ C 303 25.1% 56.8%
717
+ D 295 24.5% 55.3%
718
+
719
+ Bias Indicators:
720
+ GT Distribution Std: 1.05%p (uniform=0)
721
+ Pred Distribution Std: 2.19%p (uniform=0)
722
+ Overall Accuracy: 56.2%
723
+
724
+ --- FAR (n=594) ---
725
+
726
+ GT Answer Distribution:
727
+ Pos Count Pct Acc when GT
728
+ -----------------------------------
729
+ A 159 26.8% 62.3%
730
+ B 156 26.3% 56.4%
731
+ C 130 21.9% 63.8%
732
+ D 149 25.1% 53.0%
733
+
734
+ Model Prediction Distribution:
735
+ Pos Count Pct Acc when Pred
736
+ -----------------------------------
737
+ A 177 29.8% 55.9%
738
+ B 138 23.2% 63.8%
739
+ C 146 24.6% 56.8%
740
+ D 133 22.4% 59.4%
741
+
742
+ Bias Indicators:
743
+ GT Distribution Std: 1.90%p (uniform=0)
744
+ Pred Distribution Std: 2.88%p (uniform=0)
745
+ Overall Accuracy: 58.8%
746
+
747
+ --- CLOSE (n=612) ---
748
+
749
+ GT Answer Distribution:
750
+ Pos Count Pct Acc when GT
751
+ -----------------------------------
752
+ A 160 26.1% 55.6%
753
+ B 134 21.9% 50.0%
754
+ C 159 26.0% 56.0%
755
+ D 159 26.0% 52.8%
756
+
757
+ Model Prediction Distribution:
758
+ Pos Count Pct Acc when Pred
759
+ -----------------------------------
760
+ A 164 26.8% 54.3%
761
+ B 129 21.1% 51.9%
762
+ C 157 25.7% 56.7%
763
+ D 162 26.5% 51.9%
764
+
765
+ Bias Indicators:
766
+ GT Distribution Std: 1.79%p (uniform=0)
767
+ Pred Distribution Std: 2.30%p (uniform=0)
768
+ Overall Accuracy: 53.8%
769
+
770
+ ================================================================================
771
+ Model: NVILA-Lite-2B-data-scale-exp-800k
772
+ ================================================================================
773
+
774
+ --- ALL (n=3640) ---
775
+
776
+ GT Answer Distribution:
777
+ Pos Count Pct Acc when GT
778
+ -----------------------------------
779
+ A 958 26.3% 68.7%
780
+ B 912 25.1% 65.7%
781
+ C 852 23.4% 71.6%
782
+ D 918 25.2% 73.0%
783
+
784
+ Model Prediction Distribution:
785
+ Pos Count Pct Acc when Pred
786
+ -----------------------------------
787
+ A 931 25.6% 70.7%
788
+ B 794 21.8% 75.4%
789
+ C 893 24.5% 68.3%
790
+ D 1022 28.1% 65.6%
791
+
792
+ Bias Indicators:
793
+ GT Distribution Std: 1.04%p (uniform=0)
794
+ Pred Distribution Std: 2.25%p (uniform=0)
795
+ Overall Accuracy: 69.7%
796
+
797
+ --- FAR+CLOSE (n=1206) ---
798
+
799
+ GT Answer Distribution:
800
+ Pos Count Pct Acc when GT
801
+ -----------------------------------
802
+ A 319 26.5% 65.5%
803
+ B 290 24.0% 50.7%
804
+ C 289 24.0% 58.1%
805
+ D 308 25.5% 57.8%
806
+
807
+ Model Prediction Distribution:
808
+ Pos Count Pct Acc when Pred
809
+ -----------------------------------
810
+ A 381 31.6% 54.9%
811
+ B 236 19.6% 62.3%
812
+ C 283 23.5% 59.4%
813
+ D 306 25.4% 58.2%
814
+
815
+ Bias Indicators:
816
+ GT Distribution Std: 1.05%p (uniform=0)
817
+ Pred Distribution Std: 4.34%p (uniform=0)
818
+ Overall Accuracy: 58.2%
819
+
820
+ --- FAR (n=594) ---
821
+
822
+ GT Answer Distribution:
823
+ Pos Count Pct Acc when GT
824
+ -----------------------------------
825
+ A 159 26.8% 71.7%
826
+ B 156 26.3% 55.1%
827
+ C 130 21.9% 62.3%
828
+ D 149 25.1% 61.1%
829
+
830
+ Model Prediction Distribution:
831
+ Pos Count Pct Acc when Pred
832
+ -----------------------------------
833
+ A 199 33.5% 57.3%
834
+ B 121 20.4% 71.1%
835
+ C 127 21.4% 63.8%
836
+ D 147 24.7% 61.9%
837
+
838
+ Bias Indicators:
839
+ GT Distribution Std: 1.90%p (uniform=0)
840
+ Pred Distribution Std: 5.17%p (uniform=0)
841
+ Overall Accuracy: 62.6%
842
+
843
+ --- CLOSE (n=612) ---
844
+
845
+ GT Answer Distribution:
846
+ Pos Count Pct Acc when GT
847
+ -----------------------------------
848
+ A 160 26.1% 59.4%
849
+ B 134 21.9% 45.5%
850
+ C 159 26.0% 54.7%
851
+ D 159 26.0% 54.7%
852
+
853
+ Model Prediction Distribution:
854
+ Pos Count Pct Acc when Pred
855
+ -----------------------------------
856
+ A 182 29.7% 52.2%
857
+ B 115 18.8% 53.0%
858
+ C 156 25.5% 55.8%
859
+ D 159 26.0% 54.7%
860
+
861
+ Bias Indicators:
862
+ GT Distribution Std: 1.79%p (uniform=0)
863
+ Pred Distribution Std: 3.94%p (uniform=0)
864
+ Overall Accuracy: 53.9%
865
+
866
+ ================================================================================
867
+ Model: NVILA-Lite-2B-data-scale-exp-2m
868
+ ================================================================================
869
+
870
+ --- ALL (n=3640) ---
871
+
872
+ GT Answer Distribution:
873
+ Pos Count Pct Acc when GT
874
+ -----------------------------------
875
+ A 958 26.3% 72.4%
876
+ B 912 25.1% 65.1%
877
+ C 852 23.4% 70.2%
878
+ D 918 25.2% 69.6%
879
+
880
+ Model Prediction Distribution:
881
+ Pos Count Pct Acc when Pred
882
+ -----------------------------------
883
+ A 1008 27.7% 68.8%
884
+ B 784 21.5% 75.8%
885
+ C 892 24.5% 67.0%
886
+ D 956 26.3% 66.8%
887
+
888
+ Bias Indicators:
889
+ GT Distribution Std: 1.04%p (uniform=0)
890
+ Pred Distribution Std: 2.30%p (uniform=0)
891
+ Overall Accuracy: 69.4%
892
+
893
+ --- FAR+CLOSE (n=1206) ---
894
+
895
+ GT Answer Distribution:
896
+ Pos Count Pct Acc when GT
897
+ -----------------------------------
898
+ A 319 26.5% 64.9%
899
+ B 290 24.0% 50.7%
900
+ C 289 24.0% 58.5%
901
+ D 308 25.5% 52.9%
902
+
903
+ Model Prediction Distribution:
904
+ Pos Count Pct Acc when Pred
905
+ -----------------------------------
906
+ A 379 31.4% 54.6%
907
+ B 239 19.8% 61.5%
908
+ C 305 25.3% 55.4%
909
+ D 283 23.5% 57.6%
910
+
911
+ Bias Indicators:
912
+ GT Distribution Std: 1.05%p (uniform=0)
913
+ Pred Distribution Std: 4.20%p (uniform=0)
914
+ Overall Accuracy: 56.9%
915
+
916
+ --- FAR (n=594) ---
917
+
918
+ GT Answer Distribution:
919
+ Pos Count Pct Acc when GT
920
+ -----------------------------------
921
+ A 159 26.8% 69.8%
922
+ B 156 26.3% 55.8%
923
+ C 130 21.9% 64.6%
924
+ D 149 25.1% 55.0%
925
+
926
+ Model Prediction Distribution:
927
+ Pos Count Pct Acc when Pred
928
+ -----------------------------------
929
+ A 194 32.7% 57.2%
930
+ B 126 21.2% 69.0%
931
+ C 144 24.2% 58.3%
932
+ D 130 21.9% 63.1%
933
+
934
+ Bias Indicators:
935
+ GT Distribution Std: 1.90%p (uniform=0)
936
+ Pred Distribution Std: 4.56%p (uniform=0)
937
+ Overall Accuracy: 61.3%
938
+
939
+ --- CLOSE (n=612) ---
940
+
941
+ GT Answer Distribution:
942
+ Pos Count Pct Acc when GT
943
+ -----------------------------------
944
+ A 160 26.1% 60.0%
945
+ B 134 21.9% 44.8%
946
+ C 159 26.0% 53.5%
947
+ D 159 26.0% 50.9%
948
+
949
+ Model Prediction Distribution:
950
+ Pos Count Pct Acc when Pred
951
+ -----------------------------------
952
+ A 185 30.2% 51.9%
953
+ B 113 18.5% 53.1%
954
+ C 161 26.3% 52.8%
955
+ D 153 25.0% 52.9%
956
+
957
+ Bias Indicators:
958
+ GT Distribution Std: 1.79%p (uniform=0)
959
+ Pred Distribution Std: 4.24%p (uniform=0)
960
+ Overall Accuracy: 52.6%
961
+
962
+ ================================================================================
963
+ Model: RoboRefer-2B-SFT
964
+ ================================================================================
965
+
966
+ --- ALL (n=3640) ---
967
+
968
+ GT Answer Distribution:
969
+ Pos Count Pct Acc when GT
970
+ -----------------------------------
971
+ A 958 26.3% 94.1%
972
+ B 912 25.1% 93.9%
973
+ C 852 23.4% 92.1%
974
+ D 918 25.2% 88.0%
975
+
976
+ Model Prediction Distribution:
977
+ Pos Count Pct Acc when Pred
978
+ -----------------------------------
979
+ A 991 27.3% 90.8%
980
+ B 937 25.8% 91.2%
981
+ C 851 23.4% 91.9%
982
+ D 851 23.4% 94.8%
983
+
984
+ Bias Indicators:
985
+ GT Distribution Std: 1.04%p (uniform=0)
986
+ Pred Distribution Std: 1.64%p (uniform=0)
987
+ Overall Accuracy: 92.0%
988
+
989
+ --- FAR+CLOSE (n=1206) ---
990
+
991
+ GT Answer Distribution:
992
+ Pos Count Pct Acc when GT
993
+ -----------------------------------
994
+ A 319 26.5% 88.1%
995
+ B 290 24.0% 84.8%
996
+ C 289 24.0% 81.7%
997
+ D 308 25.5% 76.0%
998
+
999
+ Model Prediction Distribution:
1000
+ Pos Count Pct Acc when Pred
1001
+ -----------------------------------
1002
+ A 352 29.4% 79.5%
1003
+ B 295 24.7% 83.1%
1004
+ C 280 23.4% 83.2%
1005
+ D 269 22.5% 86.6%
1006
+
1007
+ Bias Indicators:
1008
+ GT Distribution Std: 1.05%p (uniform=0)
1009
+ Pred Distribution Std: 2.67%p (uniform=0)
1010
+ Overall Accuracy: 82.7%
1011
+
1012
+ --- FAR (n=594) ---
1013
+
1014
+ GT Answer Distribution:
1015
+ Pos Count Pct Acc when GT
1016
+ -----------------------------------
1017
+ A 159 26.8% 90.6%
1018
+ B 156 26.3% 85.3%
1019
+ C 130 21.9% 76.2%
1020
+ D 149 25.1% 72.5%
1021
+
1022
+ Model Prediction Distribution:
1023
+ Pos Count Pct Acc when Pred
1024
+ -----------------------------------
1025
+ A 186 31.4% 76.9%
1026
+ B 161 27.2% 82.0%
1027
+ C 119 20.1% 82.4%
1028
+ D 126 21.3% 86.5%
1029
+
1030
+ Bias Indicators:
1031
+ GT Distribution Std: 1.90%p (uniform=0)
1032
+ Pred Distribution Std: 4.58%p (uniform=0)
1033
+ Overall Accuracy: 81.5%
1034
+
1035
+ --- CLOSE (n=612) ---
1036
+
1037
+ GT Answer Distribution:
1038
+ Pos Count Pct Acc when GT
1039
+ -----------------------------------
1040
+ A 160 26.1% 85.6%
1041
+ B 134 21.9% 84.3%
1042
+ C 159 26.0% 86.2%
1043
+ D 159 26.0% 79.2%
1044
+
1045
+ Model Prediction Distribution:
1046
+ Pos Count Pct Acc when Pred
1047
+ -----------------------------------
1048
+ A 166 27.5% 82.5%
1049
+ B 134 22.2% 84.3%
1050
+ C 161 26.7% 83.9%
1051
+ D 143 23.7% 86.7%
1052
+
1053
+ Bias Indicators:
1054
+ GT Distribution Std: 1.79%p (uniform=0)
1055
+ Pred Distribution Std: 2.16%p (uniform=0)
1056
+ Overall Accuracy: 83.8%
1057
+
1058
+ ================================================================================
1059
+ Model: Qwen2.5-VL-3B-Instruct
1060
+ ================================================================================
1061
+
1062
+ --- ALL (n=3640) ---
1063
+
1064
+ GT Answer Distribution:
1065
+ Pos Count Pct Acc when GT
1066
+ -----------------------------------
1067
+ A 958 26.3% 51.9%
1068
+ B 912 25.1% 61.5%
1069
+ C 852 23.4% 64.0%
1070
+ D 918 25.2% 72.3%
1071
+
1072
+ Model Prediction Distribution:
1073
+ Pos Count Pct Acc when Pred
1074
+ -----------------------------------
1075
+ A 682 18.7% 72.9%
1076
+ B 863 23.7% 65.0%
1077
+ C 938 25.8% 58.1%
1078
+ D 1157 31.8% 57.4%
1079
+
1080
+ Bias Indicators:
1081
+ GT Distribution Std: 1.04%p (uniform=0)
1082
+ Pred Distribution Std: 4.68%p (uniform=0)
1083
+ Overall Accuracy: 62.3%
1084
+
1085
+ --- FAR+CLOSE (n=1206) ---
1086
+
1087
+ GT Answer Distribution:
1088
+ Pos Count Pct Acc when GT
1089
+ -----------------------------------
1090
+ A 319 26.5% 42.3%
1091
+ B 290 24.0% 47.2%
1092
+ C 289 24.0% 49.8%
1093
+ D 308 25.5% 62.7%
1094
+
1095
+ Model Prediction Distribution:
1096
+ Pos Count Pct Acc when Pred
1097
+ -----------------------------------
1098
+ A 226 18.7% 59.7%
1099
+ B 266 22.1% 51.5%
1100
+ C 301 25.0% 47.8%
1101
+ D 413 34.2% 46.7%
1102
+
1103
+ Bias Indicators:
1104
+ GT Distribution Std: 1.05%p (uniform=0)
1105
+ Pred Distribution Std: 5.77%p (uniform=0)
1106
+ Overall Accuracy: 50.5%
1107
+
1108
+ --- FAR (n=594) ---
1109
+
1110
+ GT Answer Distribution:
1111
+ Pos Count Pct Acc when GT
1112
+ -----------------------------------
1113
+ A 159 26.8% 42.8%
1114
+ B 156 26.3% 48.1%
1115
+ C 130 21.9% 48.5%
1116
+ D 149 25.1% 66.4%
1117
+
1118
+ Model Prediction Distribution:
1119
+ Pos Count Pct Acc when Pred
1120
+ -----------------------------------
1121
+ A 114 19.2% 59.6%
1122
+ B 135 22.7% 55.6%
1123
+ C 134 22.6% 47.0%
1124
+ D 211 35.5% 46.9%
1125
+
1126
+ Bias Indicators:
1127
+ GT Distribution Std: 1.90%p (uniform=0)
1128
+ Pred Distribution Std: 6.24%p (uniform=0)
1129
+ Overall Accuracy: 51.3%
1130
+
1131
+ --- CLOSE (n=612) ---
1132
+
1133
+ GT Answer Distribution:
1134
+ Pos Count Pct Acc when GT
1135
+ -----------------------------------
1136
+ A 160 26.1% 41.9%
1137
+ B 134 21.9% 46.3%
1138
+ C 159 26.0% 50.9%
1139
+ D 159 26.0% 59.1%
1140
+
1141
+ Model Prediction Distribution:
1142
+ Pos Count Pct Acc when Pred
1143
+ -----------------------------------
1144
+ A 112 18.3% 59.8%
1145
+ B 131 21.4% 47.3%
1146
+ C 167 27.3% 48.5%
1147
+ D 202 33.0% 46.5%
1148
+
1149
+ Bias Indicators:
1150
+ GT Distribution Std: 1.79%p (uniform=0)
1151
+ Pred Distribution Std: 5.64%p (uniform=0)
1152
+ Overall Accuracy: 49.7%
1153
+
1154
+ ================================================================================
1155
+ Model: Qwen2.5-VL-3B-Instruct-data_scale_exp_80k
1156
+ ================================================================================
1157
+
1158
+ --- ALL (n=3640) ---
1159
+
1160
+ GT Answer Distribution:
1161
+ Pos Count Pct Acc when GT
1162
+ -----------------------------------
1163
+ A 958 26.3% 47.9%
1164
+ B 912 25.1% 57.0%
1165
+ C 852 23.4% 61.6%
1166
+ D 918 25.2% 63.5%
1167
+
1168
+ Model Prediction Distribution:
1169
+ Pos Count Pct Acc when Pred
1170
+ -----------------------------------
1171
+ A 704 19.3% 65.2%
1172
+ B 883 24.3% 58.9%
1173
+ C 974 26.8% 53.9%
1174
+ D 1079 29.6% 54.0%
1175
+
1176
+ Bias Indicators:
1177
+ GT Distribution Std: 1.04%p (uniform=0)
1178
+ Pred Distribution Std: 3.78%p (uniform=0)
1179
+ Overall Accuracy: 57.3%
1180
+
1181
+ --- FAR+CLOSE (n=1206) ---
1182
+
1183
+ GT Answer Distribution:
1184
+ Pos Count Pct Acc when GT
1185
+ -----------------------------------
1186
+ A 319 26.5% 41.7%
1187
+ B 290 24.0% 45.5%
1188
+ C 289 24.0% 45.3%
1189
+ D 308 25.5% 54.5%
1190
+
1191
+ Model Prediction Distribution:
1192
+ Pos Count Pct Acc when Pred
1193
+ -----------------------------------
1194
+ A 238 19.7% 55.9%
1195
+ B 270 22.4% 48.9%
1196
+ C 296 24.5% 44.3%
1197
+ D 402 33.3% 41.8%
1198
+
1199
+ Bias Indicators:
1200
+ GT Distribution Std: 1.05%p (uniform=0)
1201
+ Pred Distribution Std: 5.10%p (uniform=0)
1202
+ Overall Accuracy: 46.8%
1203
+
1204
+ --- FAR (n=594) ---
1205
+
1206
+ GT Answer Distribution:
1207
+ Pos Count Pct Acc when GT
1208
+ -----------------------------------
1209
+ A 159 26.8% 44.0%
1210
+ B 156 26.3% 46.8%
1211
+ C 130 21.9% 46.2%
1212
+ D 149 25.1% 61.7%
1213
+
1214
+ Model Prediction Distribution:
1215
+ Pos Count Pct Acc when Pred
1216
+ -----------------------------------
1217
+ A 123 20.7% 56.9%
1218
+ B 131 22.1% 55.7%
1219
+ C 139 23.4% 43.2%
1220
+ D 201 33.8% 45.8%
1221
+
1222
+ Bias Indicators:
1223
+ GT Distribution Std: 1.90%p (uniform=0)
1224
+ Pred Distribution Std: 5.19%p (uniform=0)
1225
+ Overall Accuracy: 49.7%
1226
+
1227
+ --- CLOSE (n=612) ---
1228
+
1229
+ GT Answer Distribution:
1230
+ Pos Count Pct Acc when GT
1231
+ -----------------------------------
1232
+ A 160 26.1% 39.4%
1233
+ B 134 21.9% 44.0%
1234
+ C 159 26.0% 44.7%
1235
+ D 159 26.0% 47.8%
1236
+
1237
+ Model Prediction Distribution:
1238
+ Pos Count Pct Acc when Pred
1239
+ -----------------------------------
1240
+ A 115 18.8% 54.8%
1241
+ B 139 22.7% 42.4%
1242
+ C 157 25.7% 45.2%
1243
+ D 201 32.8% 37.8%
1244
+
1245
+ Bias Indicators:
1246
+ GT Distribution Std: 1.79%p (uniform=0)
1247
+ Pred Distribution Std: 5.14%p (uniform=0)
1248
+ Overall Accuracy: 44.0%
1249
+
1250
+ ================================================================================
1251
+ Model: Qwen2.5-VL-3B-Instruct-data_scale_exp_400k
1252
+ ================================================================================
1253
+
1254
+ --- ALL (n=3640) ---
1255
+
1256
+ GT Answer Distribution:
1257
+ Pos Count Pct Acc when GT
1258
+ -----------------------------------
1259
+ A 958 26.3% 50.1%
1260
+ B 912 25.1% 56.6%
1261
+ C 852 23.4% 62.3%
1262
+ D 918 25.2% 66.1%
1263
+
1264
+ Model Prediction Distribution:
1265
+ Pos Count Pct Acc when Pred
1266
+ -----------------------------------
1267
+ A 736 20.2% 65.2%
1268
+ B 825 22.7% 62.5%
1269
+ C 966 26.5% 55.0%
1270
+ D 1113 30.6% 54.5%
1271
+
1272
+ Bias Indicators:
1273
+ GT Distribution Std: 1.04%p (uniform=0)
1274
+ Pred Distribution Std: 3.93%p (uniform=0)
1275
+ Overall Accuracy: 58.6%
1276
+
1277
+ --- FAR+CLOSE (n=1206) ---
1278
+
1279
+ GT Answer Distribution:
1280
+ Pos Count Pct Acc when GT
1281
+ -----------------------------------
1282
+ A 319 26.5% 43.6%
1283
+ B 290 24.0% 43.4%
1284
+ C 289 24.0% 45.3%
1285
+ D 308 25.5% 60.4%
1286
+
1287
+ Model Prediction Distribution:
1288
+ Pos Count Pct Acc when Pred
1289
+ -----------------------------------
1290
+ A 243 20.1% 57.2%
1291
+ B 238 19.7% 52.9%
1292
+ C 299 24.8% 43.8%
1293
+ D 426 35.3% 43.7%
1294
+
1295
+ Bias Indicators:
1296
+ GT Distribution Std: 1.05%p (uniform=0)
1297
+ Pred Distribution Std: 6.28%p (uniform=0)
1298
+ Overall Accuracy: 48.3%
1299
+
1300
+ --- FAR (n=594) ---
1301
+
1302
+ GT Answer Distribution:
1303
+ Pos Count Pct Acc when GT
1304
+ -----------------------------------
1305
+ A 159 26.8% 46.5%
1306
+ B 156 26.3% 44.9%
1307
+ C 130 21.9% 48.5%
1308
+ D 149 25.1% 65.8%
1309
+
1310
+ Model Prediction Distribution:
1311
+ Pos Count Pct Acc when Pred
1312
+ -----------------------------------
1313
+ A 126 21.2% 58.7%
1314
+ B 118 19.9% 59.3%
1315
+ C 143 24.1% 44.1%
1316
+ D 207 34.8% 47.3%
1317
+
1318
+ Bias Indicators:
1319
+ GT Distribution Std: 1.90%p (uniform=0)
1320
+ Pred Distribution Std: 5.89%p (uniform=0)
1321
+ Overall Accuracy: 51.3%
1322
+
1323
+ --- CLOSE (n=612) ---
1324
+
1325
+ GT Answer Distribution:
1326
+ Pos Count Pct Acc when GT
1327
+ -----------------------------------
1328
+ A 160 26.1% 40.6%
1329
+ B 134 21.9% 41.8%
1330
+ C 159 26.0% 42.8%
1331
+ D 159 26.0% 55.3%
1332
+
1333
+ Model Prediction Distribution:
1334
+ Pos Count Pct Acc when Pred
1335
+ -----------------------------------
1336
+ A 117 19.1% 55.6%
1337
+ B 120 19.6% 46.7%
1338
+ C 156 25.5% 43.6%
1339
+ D 219 35.8% 40.2%
1340
+
1341
+ Bias Indicators:
1342
+ GT Distribution Std: 1.79%p (uniform=0)
1343
+ Pred Distribution Std: 6.71%p (uniform=0)
1344
+ Overall Accuracy: 45.3%
1345
+
1346
+ ================================================================================
1347
+ Model: Qwen2.5-VL-3B-Instruct-data_scale_exp_800k
1348
+ ================================================================================
1349
+
1350
+ --- ALL (n=3640) ---
1351
+
1352
+ GT Answer Distribution:
1353
+ Pos Count Pct Acc when GT
1354
+ -----------------------------------
1355
+ A 958 26.3% 55.7%
1356
+ B 912 25.1% 57.9%
1357
+ C 852 23.4% 63.5%
1358
+ D 918 25.2% 66.7%
1359
+
1360
+ Model Prediction Distribution:
1361
+ Pos Count Pct Acc when Pred
1362
+ -----------------------------------
1363
+ A 814 22.4% 65.6%
1364
+ B 810 22.3% 65.2%
1365
+ C 937 25.7% 57.7%
1366
+ D 1079 29.6% 56.7%
1367
+
1368
+ Bias Indicators:
1369
+ GT Distribution Std: 1.04%p (uniform=0)
1370
+ Pred Distribution Std: 3.03%p (uniform=0)
1371
+ Overall Accuracy: 60.9%
1372
+
1373
+ --- FAR+CLOSE (n=1206) ---
1374
+
1375
+ GT Answer Distribution:
1376
+ Pos Count Pct Acc when GT
1377
+ -----------------------------------
1378
+ A 319 26.5% 51.4%
1379
+ B 290 24.0% 45.9%
1380
+ C 289 24.0% 45.7%
1381
+ D 308 25.5% 58.8%
1382
+
1383
+ Model Prediction Distribution:
1384
+ Pos Count Pct Acc when Pred
1385
+ -----------------------------------
1386
+ A 294 24.4% 55.8%
1387
+ B 235 19.5% 56.6%
1388
+ C 283 23.5% 46.6%
1389
+ D 394 32.7% 45.9%
1390
+
1391
+ Bias Indicators:
1392
+ GT Distribution Std: 1.05%p (uniform=0)
1393
+ Pred Distribution Std: 4.80%p (uniform=0)
1394
+ Overall Accuracy: 50.6%
1395
+
1396
+ --- FAR (n=594) ---
1397
+
1398
+ GT Answer Distribution:
1399
+ Pos Count Pct Acc when GT
1400
+ -----------------------------------
1401
+ A 159 26.8% 56.6%
1402
+ B 156 26.3% 46.8%
1403
+ C 130 21.9% 50.0%
1404
+ D 149 25.1% 65.1%
1405
+
1406
+ Model Prediction Distribution:
1407
+ Pos Count Pct Acc when Pred
1408
+ -----------------------------------
1409
+ A 156 26.3% 57.7%
1410
+ B 109 18.4% 67.0%
1411
+ C 139 23.4% 46.8%
1412
+ D 190 32.0% 51.1%
1413
+
1414
+ Bias Indicators:
1415
+ GT Distribution Std: 1.90%p (uniform=0)
1416
+ Pred Distribution Std: 4.93%p (uniform=0)
1417
+ Overall Accuracy: 54.7%
1418
+
1419
+ --- CLOSE (n=612) ---
1420
+
1421
+ GT Answer Distribution:
1422
+ Pos Count Pct Acc when GT
1423
+ -----------------------------------
1424
+ A 160 26.1% 46.2%
1425
+ B 134 21.9% 44.8%
1426
+ C 159 26.0% 42.1%
1427
+ D 159 26.0% 52.8%
1428
+
1429
+ Model Prediction Distribution:
1430
+ Pos Count Pct Acc when Pred
1431
+ -----------------------------------
1432
+ A 138 22.5% 53.6%
1433
+ B 126 20.6% 47.6%
1434
+ C 144 23.5% 46.5%
1435
+ D 204 33.3% 41.2%
1436
+
1437
+ Bias Indicators:
1438
+ GT Distribution Std: 1.79%p (uniform=0)
1439
+ Pred Distribution Std: 4.93%p (uniform=0)
1440
+ Overall Accuracy: 46.6%
1441
+
1442
+ ================================================================================
1443
+ Model: Qwen2.5-VL-3B-Instruct-data_scale_exp_2m
1444
+ ================================================================================
1445
+
1446
+ --- ALL (n=3640) ---
1447
+
1448
+ GT Answer Distribution:
1449
+ Pos Count Pct Acc when GT
1450
+ -----------------------------------
1451
+ A 958 26.3% 63.3%
1452
+ B 912 25.1% 63.7%
1453
+ C 852 23.4% 69.8%
1454
+ D 918 25.2% 66.6%
1455
+
1456
+ Model Prediction Distribution:
1457
+ Pos Count Pct Acc when Pred
1458
+ -----------------------------------
1459
+ A 890 24.5% 68.1%
1460
+ B 831 22.8% 69.9%
1461
+ C 961 26.4% 61.9%
1462
+ D 958 26.3% 63.8%
1463
+
1464
+ Bias Indicators:
1465
+ GT Distribution Std: 1.04%p (uniform=0)
1466
+ Pred Distribution Std: 1.48%p (uniform=0)
1467
+ Overall Accuracy: 65.7%
1468
+
1469
+ --- FAR+CLOSE (n=1206) ---
1470
+
1471
+ GT Answer Distribution:
1472
+ Pos Count Pct Acc when GT
1473
+ -----------------------------------
1474
+ A 319 26.5% 57.4%
1475
+ B 290 24.0% 50.0%
1476
+ C 289 24.0% 54.7%
1477
+ D 308 25.5% 55.8%
1478
+
1479
+ Model Prediction Distribution:
1480
+ Pos Count Pct Acc when Pred
1481
+ -----------------------------------
1482
+ A 322 26.7% 56.8%
1483
+ B 245 20.3% 59.2%
1484
+ C 309 25.6% 51.1%
1485
+ D 330 27.4% 52.1%
1486
+
1487
+ Bias Indicators:
1488
+ GT Distribution Std: 1.05%p (uniform=0)
1489
+ Pred Distribution Std: 2.78%p (uniform=0)
1490
+ Overall Accuracy: 54.6%
1491
+
1492
+ --- FAR (n=594) ---
1493
+
1494
+ GT Answer Distribution:
1495
+ Pos Count Pct Acc when GT
1496
+ -----------------------------------
1497
+ A 159 26.8% 61.0%
1498
+ B 156 26.3% 50.0%
1499
+ C 130 21.9% 53.1%
1500
+ D 149 25.1% 59.7%
1501
+
1502
+ Model Prediction Distribution:
1503
+ Pos Count Pct Acc when Pred
1504
+ -----------------------------------
1505
+ A 165 27.8% 58.8%
1506
+ B 127 21.4% 61.4%
1507
+ C 146 24.6% 47.3%
1508
+ D 156 26.3% 57.1%
1509
+
1510
+ Bias Indicators:
1511
+ GT Distribution Std: 1.90%p (uniform=0)
1512
+ Pred Distribution Std: 2.38%p (uniform=0)
1513
+ Overall Accuracy: 56.1%
1514
+
1515
+ --- CLOSE (n=612) ---
1516
+
1517
+ GT Answer Distribution:
1518
+ Pos Count Pct Acc when GT
1519
+ -----------------------------------
1520
+ A 160 26.1% 53.8%
1521
+ B 134 21.9% 50.0%
1522
+ C 159 26.0% 56.0%
1523
+ D 159 26.0% 52.2%
1524
+
1525
+ Model Prediction Distribution:
1526
+ Pos Count Pct Acc when Pred
1527
+ -----------------------------------
1528
+ A 157 25.7% 54.8%
1529
+ B 118 19.3% 56.8%
1530
+ C 163 26.6% 54.6%
1531
+ D 174 28.4% 47.7%
1532
+
1533
+ Bias Indicators:
1534
+ GT Distribution Std: 1.79%p (uniform=0)
1535
+ Pred Distribution Std: 3.45%p (uniform=0)
1536
+ Overall Accuracy: 53.1%
1537
+
1538
+ ====================================================================================================
1539
+ MODEL BIAS COMPARISON SUMMARY
1540
+ ====================================================================================================
1541
+
1542
+ Model Subset GT Std Pred Std Pred Max Acc
1543
+ -------------------------------------------------------------------------------------------------
1544
+ molmo-7B-O-0924 ALL 1.0%p 4.3%p A(30.3%) 60.7%
1545
+ FAR+CLOSE 1.0%p 1.9%p A(27.3%) 59.0%
1546
+ FAR 1.9%p 1.7%p D(27.9%) 61.6%
1547
+ CLOSE 1.8%p 3.5%p A(30.7%) 56.4%
1548
+ molmo-7B-O-0924-data_scale_exp_80k ALL 1.0%p 3.6%p C(28.4%) 52.9%
1549
+ FAR+CLOSE 1.0%p 1.3%p D(26.5%) 55.2%
1550
+ FAR 1.9%p 2.5%p B(27.4%) 52.0%
1551
+ CLOSE 1.8%p 3.0%p A(28.4%) 58.3%
1552
+ molmo-7B-O-0924-data_scale_exp_400k ALL 1.0%p 1.4%p C(26.6%) 64.9%
1553
+ FAR+CLOSE 1.0%p 2.2%p C(28.5%) 56.6%
1554
+ FAR 1.9%p 2.7%p C(28.8%) 58.8%
1555
+ CLOSE 1.8%p 2.0%p C(28.3%) 54.4%
1556
+ molmo-7B-O-0924-data_scale_exp_800k ALL 1.0%p 4.9%p D(32.0%) 69.1%
1557
+ FAR+CLOSE 1.0%p 9.8%p D(40.8%) 59.9%
1558
+ FAR 1.9%p 11.7%p D(44.4%) 60.6%
1559
+ CLOSE 1.8%p 8.1%p D(37.3%) 59.2%
1560
+ molmo-7B-O-0924-data_scale_exp_2m ALL 1.0%p 3.8%p D(31.6%) 74.3%
1561
+ FAR+CLOSE 1.0%p 6.2%p D(35.4%) 60.7%
1562
+ FAR 1.9%p 8.1%p D(39.1%) 58.9%
1563
+ CLOSE 1.8%p 5.1%p D(31.9%) 62.4%
1564
+ NVILA-Lite-2B ALL 1.0%p 4.8%p A(32.0%) 17.0%
1565
+ FAR+CLOSE 1.0%p 5.0%p A(33.0%) 14.3%
1566
+ FAR 1.9%p 7.1%p A(36.5%) 13.6%
1567
+ CLOSE 1.8%p 4.3%p A(29.6%) 15.0%
1568
+ NVILA-Lite-2B-data-scale-exp-80k ALL 1.0%p 2.2%p B(27.7%) 65.1%
1569
+ FAR+CLOSE 1.0%p 4.2%p A(30.1%) 50.5%
1570
+ FAR 1.9%p 5.9%p A(32.7%) 55.6%
1571
+ CLOSE 1.8%p 2.8%p A(27.6%) 45.6%
1572
+ NVILA-Lite-2B-data-scale-exp-400k ALL 1.0%p 1.5%p D(27.4%) 62.1%
1573
+ FAR+CLOSE 1.0%p 2.2%p A(28.3%) 56.2%
1574
+ FAR 1.9%p 2.9%p A(29.8%) 58.8%
1575
+ CLOSE 1.8%p 2.3%p A(26.8%) 53.8%
1576
+ NVILA-Lite-2B-data-scale-exp-800k ALL 1.0%p 2.2%p D(28.1%) 69.7%
1577
+ FAR+CLOSE 1.0%p 4.3%p A(31.6%) 58.2%
1578
+ FAR 1.9%p 5.2%p A(33.5%) 62.6%
1579
+ CLOSE 1.8%p 3.9%p A(29.7%) 53.9%
1580
+ NVILA-Lite-2B-data-scale-exp-2m ALL 1.0%p 2.3%p A(27.7%) 69.4%
1581
+ FAR+CLOSE 1.0%p 4.2%p A(31.4%) 56.9%
1582
+ FAR 1.9%p 4.6%p A(32.7%) 61.3%
1583
+ CLOSE 1.8%p 4.2%p A(30.2%) 52.6%
1584
+ RoboRefer-2B-SFT ALL 1.0%p 1.6%p A(27.3%) 92.0%
1585
+ FAR+CLOSE 1.0%p 2.7%p A(29.4%) 82.7%
1586
+ FAR 1.9%p 4.6%p A(31.4%) 81.5%
1587
+ CLOSE 1.8%p 2.2%p A(27.5%) 83.8%
1588
+ Qwen2.5-VL-3B-Instruct ALL 1.0%p 4.7%p D(31.8%) 62.3%
1589
+ FAR+CLOSE 1.0%p 5.8%p D(34.2%) 50.5%
1590
+ FAR 1.9%p 6.2%p D(35.5%) 51.3%
1591
+ CLOSE 1.8%p 5.6%p D(33.0%) 49.7%
1592
+ Qwen2.5-VL-3B-Instruct-data_scale_exp_80k ALL 1.0%p 3.8%p D(29.6%) 57.3%
1593
+ FAR+CLOSE 1.0%p 5.1%p D(33.3%) 46.8%
1594
+ FAR 1.9%p 5.2%p D(33.8%) 49.7%
1595
+ CLOSE 1.8%p 5.1%p D(32.8%) 44.0%
1596
+ Qwen2.5-VL-3B-Instruct-data_scale_exp_400k ALL 1.0%p 3.9%p D(30.6%) 58.6%
1597
+ FAR+CLOSE 1.0%p 6.3%p D(35.3%) 48.3%
1598
+ FAR 1.9%p 5.9%p D(34.8%) 51.3%
1599
+ CLOSE 1.8%p 6.7%p D(35.8%) 45.3%
1600
+ Qwen2.5-VL-3B-Instruct-data_scale_exp_800k ALL 1.0%p 3.0%p D(29.6%) 60.9%
1601
+ FAR+CLOSE 1.0%p 4.8%p D(32.7%) 50.6%
1602
+ FAR 1.9%p 4.9%p D(32.0%) 54.7%
1603
+ CLOSE 1.8%p 4.9%p D(33.3%) 46.6%
1604
+ Qwen2.5-VL-3B-Instruct-data_scale_exp_2m ALL 1.0%p 1.5%p C(26.4%) 65.7%
1605
+ FAR+CLOSE 1.0%p 2.8%p D(27.4%) 54.6%
1606
+ FAR 1.9%p 2.4%p A(27.8%) 56.1%
1607
+ CLOSE 1.8%p 3.4%p D(28.4%) 53.1%
correct_filter/results/nvila/correct_only/csv/similarity_2m_L0.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.99979234,0.9990382,0.99845904,0.99873054,0.99917614
3
+ right,0.99979234,1.0000002,0.99917865,0.9987631,0.9989448,0.9993073
4
+ above,0.9990382,0.99917865,0.9999999,0.9996766,0.99956864,0.9995194
5
+ under,0.99845904,0.9987631,0.9996766,0.99999976,0.9995517,0.9991601
6
+ far,0.99873054,0.9989448,0.99956864,0.9995517,1.0000004,0.9997673
7
+ close,0.99917614,0.9993073,0.9995194,0.9991601,0.9997673,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_2m_L10.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.99946415,0.99536633,0.9942555,0.9812977,0.98185515
3
+ right,0.99946415,1.0000001,0.9956668,0.9950104,0.98220986,0.9825698
4
+ above,0.99536633,0.9956668,1.0000004,0.9991444,0.98727834,0.9875144
5
+ under,0.9942555,0.9950104,0.9991444,1.0000002,0.98638064,0.9860885
6
+ far,0.9812977,0.98220986,0.98727834,0.98638064,1.0,0.9996808
7
+ close,0.98185515,0.9825698,0.9875144,0.9860885,0.9996808,0.9999997
correct_filter/results/nvila/correct_only/csv/similarity_2m_L12.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999994,0.9992977,0.9910708,0.9898012,0.97599757,0.9766798
3
+ right,0.9992977,0.9999998,0.9917161,0.9909911,0.97698414,0.9776015
4
+ above,0.9910708,0.9917161,1.0,0.9988291,0.98402643,0.98436046
5
+ under,0.9898012,0.9909911,0.9988291,0.9999997,0.98302853,0.9827963
6
+ far,0.97599757,0.97698414,0.98402643,0.98302853,0.99999976,0.9993965
7
+ close,0.9766798,0.9776015,0.98436046,0.9827963,0.9993965,0.99999934
correct_filter/results/nvila/correct_only/csv/similarity_2m_L15.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000006,0.9960951,0.9754069,0.9750978,0.9668617,0.9666722
3
+ right,0.9960951,1.0000005,0.9735367,0.9757243,0.9667201,0.966353
4
+ above,0.9754069,0.9735367,1.000001,0.99429655,0.9720154,0.9716815
5
+ under,0.9750978,0.9757243,0.99429655,0.99999964,0.9723926,0.9707299
6
+ far,0.9668617,0.9667201,0.9720154,0.9723926,0.9999996,0.9988003
7
+ close,0.9666722,0.966353,0.9716815,0.9707299,0.9988003,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_2m_L20.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0,0.8997723,0.8560219,0.84100085,0.812119,0.81015867
3
+ right,0.8997723,1.0,0.8408875,0.8583672,0.8323572,0.82982045
4
+ above,0.8560219,0.8408875,0.99999976,0.83596146,0.8394874,0.81615543
5
+ under,0.84100085,0.8583672,0.83596146,1.0000001,0.8413338,0.8540078
6
+ far,0.812119,0.8323572,0.8394874,0.8413338,1.0000002,0.9614612
7
+ close,0.81015867,0.82982045,0.81615543,0.8540078,0.9614612,1.0000004
correct_filter/results/nvila/correct_only/csv/similarity_2m_L21.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999964,0.9202951,0.8803115,0.8690176,0.844957,0.8430869
3
+ right,0.9202951,0.9999999,0.86974925,0.8861736,0.8690333,0.8677544
4
+ above,0.8803115,0.86974925,0.9999997,0.8800344,0.8820302,0.865402
5
+ under,0.8690176,0.8861736,0.8800344,1.0,0.8819223,0.8901774
6
+ far,0.844957,0.8690333,0.8820302,0.8819223,1.0000001,0.9756321
7
+ close,0.8430869,0.8677544,0.865402,0.8901774,0.9756321,0.99999934
correct_filter/results/nvila/correct_only/csv/similarity_2m_L24.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.9441167,0.8852379,0.877577,0.8462348,0.84368265
3
+ right,0.9441167,0.9999995,0.86733294,0.88120615,0.8449521,0.8402463
4
+ above,0.8852379,0.86733294,1.0,0.9256213,0.8766693,0.86528826
5
+ under,0.877577,0.88120615,0.9256213,0.99999964,0.8739595,0.8758325
6
+ far,0.8462348,0.8449521,0.8766693,0.8739595,1.0,0.9843055
7
+ close,0.84368265,0.8402463,0.86528826,0.8758325,0.9843055,0.9999998
correct_filter/results/nvila/correct_only/csv/similarity_2m_L4.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0,0.99861413,0.99788797,0.99436826,0.9940919,0.9947342
3
+ right,0.99861413,1.0000002,0.99827963,0.9968713,0.9948081,0.9949278
4
+ above,0.99788797,0.99827963,1.0000002,0.99800307,0.9956355,0.9955324
5
+ under,0.99436826,0.9968713,0.99800307,1.0000001,0.9943704,0.99312276
6
+ far,0.9940919,0.9948081,0.9956355,0.9943704,1.0,0.9994365
7
+ close,0.9947342,0.9949278,0.9955324,0.99312276,0.9994365,1.0000002
correct_filter/results/nvila/correct_only/csv/similarity_400k_L0.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999964,0.99937326,0.99939746,0.9992237,0.99935555,0.99926114
3
+ right,0.99937326,1.0,0.99907005,0.9993704,0.9991206,0.9992242
4
+ above,0.99939746,0.99907005,1.0000002,0.999896,0.99969333,0.9996633
5
+ under,0.9992237,0.9993704,0.999896,1.0000002,0.9996517,0.99970394
6
+ far,0.99935555,0.9991206,0.99969333,0.9996517,0.9999995,0.99996144
7
+ close,0.99926114,0.9992242,0.9996633,0.99970394,0.99996144,0.99999946
correct_filter/results/nvila/correct_only/csv/similarity_400k_L10.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999994,0.9989206,0.9917971,0.99177206,0.9653694,0.9650217
3
+ right,0.9989206,0.99999994,0.9908831,0.991404,0.96409506,0.9640688
4
+ above,0.9917971,0.9908831,1.0,0.9998317,0.98056906,0.98046494
5
+ under,0.99177206,0.991404,0.9998317,1.0,0.9801248,0.9800962
6
+ far,0.9653694,0.96409506,0.98056906,0.9801248,0.99999976,0.99993694
7
+ close,0.9650217,0.9640688,0.98046494,0.9800962,0.99993694,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_400k_L12.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.9988699,0.98620236,0.9862741,0.961798,0.9616283
3
+ right,0.9988699,0.99999994,0.9853998,0.98599064,0.9603799,0.9605291
4
+ above,0.98620236,0.9853998,0.9999995,0.99963886,0.9798792,0.9798204
5
+ under,0.9862741,0.98599064,0.99963886,0.9999998,0.97985244,0.9798428
6
+ far,0.961798,0.9603799,0.9798792,0.97985244,0.9999997,0.9998978
7
+ close,0.9616283,0.9605291,0.9798204,0.9798428,0.9998978,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_400k_L13.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000001,0.9983739,0.98168665,0.98217195,0.95686316,0.956294
3
+ right,0.9983739,0.9999995,0.98163897,0.9828724,0.9566915,0.9564952
4
+ above,0.98168665,0.98163897,0.99999917,0.9987243,0.975779,0.9747824
5
+ under,0.98217195,0.9828724,0.9987243,0.99999976,0.9764465,0.9758391
6
+ far,0.95686316,0.9566915,0.975779,0.9764465,1.0000002,0.99971694
7
+ close,0.956294,0.9564952,0.9747824,0.9758391,0.99971694,1.0
correct_filter/results/nvila/correct_only/csv/similarity_400k_L14.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.9985205,0.98138416,0.9822928,0.9568843,0.9561093
3
+ right,0.9985205,1.0000004,0.98126113,0.9829378,0.95713615,0.9566823
4
+ above,0.98138416,0.98126113,0.9999999,0.9986111,0.9755904,0.97441
5
+ under,0.9822928,0.9829378,0.9986111,1.0000001,0.9771458,0.9763574
6
+ far,0.9568843,0.95713615,0.9755904,0.9771458,0.9999996,0.9997202
7
+ close,0.9561093,0.9566823,0.97441,0.9763574,0.9997202,0.9999998
correct_filter/results/nvila/correct_only/csv/similarity_400k_L15.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999976,0.9961381,0.96402776,0.9712392,0.94819504,0.94753337
3
+ right,0.9961381,0.99999946,0.96187425,0.9719884,0.94920367,0.9488889
4
+ above,0.96402776,0.96187425,0.99999994,0.99324864,0.95932007,0.9581619
5
+ under,0.9712392,0.9719884,0.99324864,1.0000002,0.9649157,0.9638857
6
+ far,0.94819504,0.94920367,0.95932007,0.9649157,0.99999964,0.9996454
7
+ close,0.94753337,0.9488889,0.9581619,0.9638857,0.9996454,0.9999997
correct_filter/results/nvila/correct_only/csv/similarity_400k_L20.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.94959533,0.9076828,0.8961903,0.8253381,0.8177571
3
+ right,0.94959533,1.0000001,0.89672065,0.9084783,0.8551606,0.85423845
4
+ above,0.9076828,0.89672065,0.9999996,0.9012549,0.8612656,0.83851355
5
+ under,0.8961903,0.9084783,0.9012549,0.99999964,0.8549114,0.87088656
6
+ far,0.8253381,0.8551606,0.8612656,0.8549114,0.9999998,0.97840255
7
+ close,0.8177571,0.85423845,0.83851355,0.87088656,0.97840255,1.0
correct_filter/results/nvila/correct_only/csv/similarity_400k_L21.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999976,0.9635052,0.92306554,0.9076893,0.8688898,0.86037034
3
+ right,0.9635052,1.0000005,0.9213424,0.9254695,0.90012884,0.8971458
4
+ above,0.92306554,0.9213424,1.0000001,0.9205528,0.89912474,0.88151306
5
+ under,0.9076893,0.9254695,0.9205528,1.0000001,0.88736314,0.8975957
6
+ far,0.8688898,0.90012884,0.89912474,0.88736314,1.0000001,0.98656934
7
+ close,0.86037034,0.8971458,0.88151306,0.8975957,0.98656934,1.0
correct_filter/results/nvila/correct_only/csv/similarity_400k_L22.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.96869767,0.920773,0.9017892,0.86359686,0.85843223
3
+ right,0.96869767,0.9999998,0.91396976,0.9125042,0.8843721,0.88239276
4
+ above,0.920773,0.91396976,0.9999998,0.93451786,0.87860537,0.8666265
5
+ under,0.9017892,0.9125042,0.93451786,1.0000002,0.8649305,0.8732993
6
+ far,0.86359686,0.8843721,0.87860537,0.8649305,0.9999998,0.98876476
7
+ close,0.85843223,0.88239276,0.8666265,0.8732993,0.98876476,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_400k_L27.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.99905974,0.9938745,0.9942479,0.9916425,0.99195606
3
+ right,0.99905974,1.0000002,0.99394035,0.9950758,0.9930149,0.9933292
4
+ above,0.9938745,0.99394035,1.0000004,0.997444,0.9924405,0.9923437
5
+ under,0.9942479,0.9950758,0.997444,0.99999964,0.99323577,0.9936469
6
+ far,0.9916425,0.9930149,0.9924405,0.99323577,0.9999994,0.9997259
7
+ close,0.99195606,0.9933292,0.9923437,0.9936469,0.9997259,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_400k_L5.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999999,0.9972796,0.9979343,0.9974654,0.9934282,0.9930154
3
+ right,0.9972796,1.0,0.996982,0.99778414,0.9929778,0.9934241
4
+ above,0.9979343,0.996982,0.9999998,0.99976426,0.994526,0.9944363
5
+ under,0.9974654,0.99778414,0.99976426,1.0000001,0.9946017,0.9946864
6
+ far,0.9934282,0.9929778,0.994526,0.9946017,1.0000001,0.99988496
7
+ close,0.9930154,0.9934241,0.9944363,0.9946864,0.99988496,0.9999999
correct_filter/results/nvila/correct_only/csv/similarity_400k_L6.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999994,0.99787366,0.99845725,0.99814403,0.9947071,0.9944121
3
+ right,0.99787366,1.0000002,0.9980315,0.99859196,0.99486274,0.9951682
4
+ above,0.99845725,0.9980315,0.99999964,0.99983656,0.9958496,0.99578476
5
+ under,0.99814403,0.99859196,0.99983656,0.99999946,0.99596035,0.99600405
6
+ far,0.9947071,0.99486274,0.9958496,0.99596035,0.9999999,0.9999253
7
+ close,0.9944121,0.9951682,0.99578476,0.99600405,0.9999253,1.0000005
correct_filter/results/nvila/correct_only/csv/similarity_400k_L7.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.9982521,0.99867207,0.998369,0.9958096,0.9954878
3
+ right,0.9982521,0.99999917,0.99839646,0.9988315,0.99623215,0.9964118
4
+ above,0.99867207,0.99839646,1.0,0.99986494,0.9967103,0.9965906
5
+ under,0.998369,0.9988315,0.99986494,0.9999998,0.99682885,0.99680567
6
+ far,0.9958096,0.99623215,0.9967103,0.99682885,0.9999999,0.9999416
7
+ close,0.9954878,0.9964118,0.9965906,0.99680567,0.9999416,1.0000002
correct_filter/results/nvila/correct_only/csv/similarity_800k_L10.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.9997335,0.9946324,0.9929318,0.98346335,0.9839893
3
+ right,0.9997335,0.99999976,0.9943074,0.99233896,0.98312676,0.9834057
4
+ above,0.9946324,0.9943074,1.0000002,0.9992869,0.99030274,0.9904754
5
+ under,0.9929318,0.99233896,0.9992869,1.0,0.98967177,0.9901939
6
+ far,0.98346335,0.98312676,0.99030274,0.98967177,1.0000002,0.9988357
7
+ close,0.9839893,0.9834057,0.9904754,0.9901939,0.9988357,1.0
correct_filter/results/nvila/correct_only/csv/similarity_800k_L15.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999994,0.9955255,0.9616093,0.9634563,0.9570518,0.9555799
3
+ right,0.9955255,1.0,0.9596715,0.9647523,0.956402,0.9524902
4
+ above,0.9616093,0.9596715,1.0000004,0.98979074,0.9572837,0.95708257
5
+ under,0.9634563,0.9647523,0.98979074,1.0,0.957764,0.95645
6
+ far,0.9570518,0.956402,0.9572837,0.957764,1.0000007,0.99549437
7
+ close,0.9555799,0.9524902,0.95708257,0.95645,0.99549437,0.99999964
correct_filter/results/nvila/correct_only/csv/similarity_800k_L16.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999993,0.937419,0.91674423,0.91465557,0.9169775,0.923213
3
+ right,0.937419,0.9999999,0.9187542,0.9189402,0.92452234,0.9273007
4
+ above,0.91674423,0.9187542,1.0,0.89840555,0.92467207,0.92687386
5
+ under,0.91465557,0.9189402,0.89840555,1.0000001,0.90919626,0.9162538
6
+ far,0.9169775,0.92452234,0.92467207,0.90919626,1.0000004,0.9890829
7
+ close,0.923213,0.9273007,0.92687386,0.9162538,0.9890829,0.99999994
correct_filter/results/nvila/correct_only/csv/similarity_800k_L18.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000001,0.88733137,0.8091318,0.83194005,0.8167282,0.81322694
3
+ right,0.88733137,1.0000002,0.83587855,0.8211735,0.8257367,0.8230107
4
+ above,0.8091318,0.83587855,0.9999997,0.7676464,0.8233376,0.7811038
5
+ under,0.83194005,0.8211735,0.7676464,0.99999976,0.81050754,0.8206099
6
+ far,0.8167282,0.8257367,0.8233376,0.81050754,1.0000004,0.94082886
7
+ close,0.81322694,0.8230107,0.7811038,0.8206099,0.94082886,1.0000004
correct_filter/results/nvila/correct_only/csv/similarity_800k_L19.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.904862,0.80710673,0.8477925,0.83130246,0.8128334
3
+ right,0.904862,1.0000001,0.83244157,0.8415096,0.837732,0.8282201
4
+ above,0.80710673,0.83244157,1.0,0.7711587,0.799425,0.7575373
5
+ under,0.8477925,0.8415096,0.7711587,0.9999999,0.81178087,0.8155661
6
+ far,0.83130246,0.837732,0.799425,0.81178087,0.99999976,0.940137
7
+ close,0.8128334,0.8282201,0.7575373,0.8155661,0.940137,1.0
correct_filter/results/nvila/correct_only/csv/similarity_800k_L20.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999976,0.9131943,0.83245647,0.8717643,0.8463599,0.8216339
3
+ right,0.9131943,1.0000001,0.84933776,0.87377536,0.85917556,0.84626216
4
+ above,0.83245647,0.84933776,1.0,0.8021531,0.81296337,0.77681535
5
+ under,0.8717643,0.87377536,0.8021531,0.9999999,0.8474405,0.84407175
6
+ far,0.8463599,0.85917556,0.81296337,0.8474405,0.9999999,0.958832
7
+ close,0.8216339,0.84626216,0.77681535,0.84407175,0.958832,0.9999998
correct_filter/results/nvila/correct_only/csv/similarity_800k_L27.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.99999976,0.9989176,0.99486405,0.9962475,0.9953634,0.99469465
3
+ right,0.9989176,1.0000006,0.99491745,0.9964434,0.9955484,0.99498737
4
+ above,0.99486405,0.99491745,1.0000001,0.9970196,0.9946532,0.99397403
5
+ under,0.9962475,0.9964434,0.9970196,1.0000004,0.9958383,0.99531734
6
+ far,0.9953634,0.9955484,0.9946532,0.9958383,1.0000002,0.9996456
7
+ close,0.99469465,0.99498737,0.99397403,0.99531734,0.9996456,1.0000005
correct_filter/results/nvila/correct_only/csv/similarity_800k_L4.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999999,0.99931574,0.9958405,0.9904083,0.99077135,0.98957026
3
+ right,0.99931574,1.0000001,0.99584186,0.99013525,0.9912202,0.98926467
4
+ above,0.9958405,0.99584186,1.0000002,0.997966,0.99544257,0.994243
5
+ under,0.9904083,0.99013525,0.997966,1.0,0.99456835,0.99469686
6
+ far,0.99077135,0.9912202,0.99544257,0.99456835,1.0000001,0.9977426
7
+ close,0.98957026,0.98926467,0.994243,0.99469686,0.9977426,1.0000002
correct_filter/results/nvila/correct_only/csv/similarity_800k_L7.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.99970233,0.9979286,0.9962451,0.99490523,0.9946172
3
+ right,0.99970233,0.9999998,0.9979725,0.9960514,0.99498373,0.9943552
4
+ above,0.9979286,0.9979725,0.99999994,0.99914914,0.9973988,0.9966422
5
+ under,0.9962451,0.9960514,0.99914914,1.0000001,0.9971471,0.9969803
6
+ far,0.99490523,0.99498373,0.9973988,0.9971471,1.0000006,0.99878615
7
+ close,0.9946172,0.9943552,0.9966422,0.9969803,0.99878615,0.9999996
correct_filter/results/nvila/correct_only/csv/similarity_800k_L8.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.9997558,0.99791306,0.99646586,0.99523884,0.9950807
3
+ right,0.9997558,1.0000001,0.9978681,0.99624777,0.99530035,0.99486065
4
+ above,0.99791306,0.9978681,0.9999996,0.9992501,0.99727607,0.9967006
5
+ under,0.99646586,0.99624777,0.9992501,0.9999994,0.99712163,0.99706066
6
+ far,0.99523884,0.99530035,0.99727607,0.99712163,0.9999997,0.99886113
7
+ close,0.9950807,0.99486065,0.9967006,0.99706066,0.99886113,1.0
correct_filter/results/nvila/correct_only/csv/similarity_80k_L0.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.9999928,0.9996275,0.9996228,0.9995172,0.9995024
3
+ right,0.9999928,0.9999999,0.9996372,0.9996381,0.9995251,0.9995048
4
+ above,0.9996275,0.9996372,1.0000001,0.9999921,0.9996672,0.9996296
5
+ under,0.9996228,0.9996381,0.9999921,0.9999999,0.99967223,0.9996285
6
+ far,0.9995172,0.9995251,0.9996672,0.99967223,1.0,0.9999763
7
+ close,0.9995024,0.9995048,0.9996296,0.9996285,0.9999763,0.99999976
correct_filter/results/nvila/correct_only/csv/similarity_80k_L1.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0,0.9999891,0.9995492,0.9995388,0.99923646,0.99923515
3
+ right,0.9999891,1.0,0.999559,0.9995541,0.9992583,0.999249
4
+ above,0.9995492,0.999559,1.0,0.99998856,0.9993894,0.9993697
5
+ under,0.9995388,0.9995541,0.99998856,1.0,0.9994066,0.9993794
6
+ far,0.99923646,0.9992583,0.9993894,0.9994066,0.99999994,0.9999724
7
+ close,0.99923515,0.999249,0.9993697,0.9993794,0.9999724,0.9999996
correct_filter/results/nvila/correct_only/csv/similarity_80k_L12.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000001,0.99992007,0.98682374,0.98694927,0.96131676,0.96126896
3
+ right,0.99992007,1.0000005,0.98669213,0.9868369,0.9610707,0.96103275
4
+ above,0.98682374,0.98669213,1.0000001,0.9996522,0.97436637,0.97421056
5
+ under,0.98694927,0.9868369,0.9996522,0.99999976,0.9743051,0.9741693
6
+ far,0.96131676,0.9610707,0.97436637,0.9743051,1.0000004,0.99982387
7
+ close,0.96126896,0.96103275,0.97421056,0.9741693,0.99982387,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_80k_L13.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.99964416,0.9821861,0.9828103,0.9578027,0.95762783
3
+ right,0.99964416,1.0000005,0.98217076,0.98289746,0.95772654,0.95757335
4
+ above,0.9821861,0.98217076,0.99999976,0.9989925,0.97151095,0.9707316
5
+ under,0.9828103,0.98289746,0.9989925,1.0000004,0.97208655,0.9714904
6
+ far,0.9578027,0.95772654,0.97151095,0.97208655,0.9999999,0.9997549
7
+ close,0.95762783,0.95757335,0.9707316,0.9714904,0.9997549,0.99999946
correct_filter/results/nvila/correct_only/csv/similarity_80k_L18.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999999,0.9506794,0.88382566,0.88530993,0.8269373,0.8271559
3
+ right,0.9506794,1.0000001,0.87476265,0.90188134,0.838693,0.84223974
4
+ above,0.88382566,0.87476265,1.0000002,0.9400799,0.9003339,0.8796129
5
+ under,0.88530993,0.90188134,0.9400799,1.0000001,0.8822267,0.894028
6
+ far,0.8269373,0.838693,0.9003339,0.8822267,0.9999995,0.99074644
7
+ close,0.8271559,0.84223974,0.8796129,0.894028,0.99074644,1.0000006
correct_filter/results/nvila/correct_only/csv/similarity_80k_L2.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999998,0.99997544,0.99910533,0.99907017,0.99487907,0.99464613
3
+ right,0.99997544,0.9999998,0.99910426,0.9991061,0.9950284,0.9947916
4
+ above,0.99910533,0.99910426,0.99999946,0.9999348,0.994704,0.994506
5
+ under,0.99907017,0.9991061,0.9999348,0.99999976,0.9948836,0.99467635
6
+ far,0.99487907,0.9950284,0.994704,0.9948836,1.0000004,0.9999575
7
+ close,0.99464613,0.9947916,0.994506,0.99467635,0.9999575,1.0000004
correct_filter/results/nvila/correct_only/csv/similarity_80k_L20.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000002,0.94373393,0.890359,0.8972434,0.8310231,0.83172584
3
+ right,0.94373393,0.9999999,0.8833537,0.9212339,0.85914224,0.86019874
4
+ above,0.890359,0.8833537,0.99999994,0.9401983,0.88969404,0.8716103
5
+ under,0.8972434,0.9212339,0.9401983,1.0,0.89284706,0.9008564
6
+ far,0.8310231,0.85914224,0.88969404,0.89284706,1.0000004,0.99279314
7
+ close,0.83172584,0.86019874,0.8716103,0.9008564,0.99279314,1.0
correct_filter/results/nvila/correct_only/csv/similarity_80k_L24.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999996,0.97742325,0.88750535,0.8872726,0.8419494,0.8421845
3
+ right,0.97742325,1.0000001,0.8867433,0.9016926,0.8560913,0.85670274
4
+ above,0.88750535,0.8867433,1.0000001,0.971692,0.8757001,0.87120354
5
+ under,0.8872726,0.9016926,0.971692,1.0000004,0.8741759,0.8779786
6
+ far,0.8419494,0.8560913,0.8757001,0.8741759,0.9999998,0.9972954
7
+ close,0.8421845,0.85670274,0.87120354,0.8779786,0.9972954,0.99999976
correct_filter/results/nvila/correct_only/csv/similarity_80k_L26.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999999,0.98567736,0.89949954,0.9015679,0.83705455,0.83549917
3
+ right,0.98567736,0.9999999,0.89631855,0.90650165,0.84298885,0.841433
4
+ above,0.89949954,0.89631855,0.99999976,0.97662956,0.8449816,0.840574
5
+ under,0.9015679,0.90650165,0.97662956,1.0000002,0.84031403,0.84105325
6
+ far,0.83705455,0.84298885,0.8449816,0.84031403,0.9999999,0.9974231
7
+ close,0.83549917,0.841433,0.840574,0.84105325,0.9974231,1.0000002
correct_filter/results/nvila/correct_only/csv/similarity_80k_L5.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0,0.99994904,0.99607235,0.99601716,0.9891803,0.98881525
3
+ right,0.99994904,0.9999997,0.99618846,0.9961636,0.9892961,0.98891604
4
+ above,0.99607235,0.99618846,0.99999994,0.9999412,0.99260557,0.99223334
5
+ under,0.99601716,0.9961636,0.9999412,1.0,0.992865,0.9924812
6
+ far,0.9891803,0.9892961,0.99260557,0.992865,0.9999999,0.9999319
7
+ close,0.98881525,0.98891604,0.99223334,0.9924812,0.9999319,1.0
correct_filter/results/nvila/correct_only/csv/similarity_80k_L7.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000007,0.99997765,0.9980402,0.99805593,0.9940974,0.99391335
3
+ right,0.99997765,0.9999998,0.9980756,0.9981079,0.99421495,0.9940232
4
+ above,0.9980402,0.9980756,1.0000001,0.9999677,0.9959363,0.99573493
5
+ under,0.99805593,0.9981079,0.9999677,1.0000001,0.9960644,0.9958569
6
+ far,0.9940974,0.99421495,0.9959363,0.9960644,0.99999994,0.99996877
7
+ close,0.99391335,0.9940232,0.99573493,0.9958569,0.99996877,1.0000002
correct_filter/results/nvila/correct_only/csv/similarity_80k_L9.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,0.9999999,0.9999779,0.9979099,0.9978664,0.9914892,0.99141896
3
+ right,0.9999779,0.99999994,0.99795455,0.9979277,0.9915622,0.9914869
4
+ above,0.9979099,0.99795455,0.99999994,0.9999599,0.9929552,0.99283284
5
+ under,0.9978664,0.9979277,0.9999599,0.9999999,0.99296045,0.99283123
6
+ far,0.9914892,0.9915622,0.9929552,0.99296045,1.0,0.9999687
7
+ close,0.99141896,0.9914869,0.99283284,0.99283123,0.9999687,1.0000004
correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L12.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000004,0.9998851,0.9772084,0.9767925,0.92733586,0.92611945
3
+ right,0.9998851,1.0,0.9774005,0.97699416,0.9275956,0.9263429
4
+ above,0.9772084,0.9774005,0.9999999,0.9994277,0.9484417,0.9475716
5
+ under,0.9767925,0.97699416,0.9994277,0.9999995,0.94778425,0.9468689
6
+ far,0.92733586,0.9275956,0.9484417,0.94778425,0.99999994,0.9996827
7
+ close,0.92611945,0.9263429,0.9475716,0.9468689,0.9996827,1.0000001
correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L13.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0000001,0.999737,0.9714929,0.9713556,0.9228223,0.92111427
3
+ right,0.999737,0.99999946,0.9718181,0.9717809,0.9234044,0.9216598
4
+ above,0.9714929,0.9718181,1.0000006,0.99914813,0.9459721,0.9443468
5
+ under,0.9713556,0.9717809,0.99914813,0.9999998,0.9453688,0.9437411
6
+ far,0.9228223,0.9234044,0.9459721,0.9453688,0.99999994,0.99955255
7
+ close,0.92111427,0.9216598,0.9443468,0.9437411,0.99955255,0.9999999
correct_filter/results/nvila/correct_only/csv/similarity_roborefer_L16.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ,left,right,above,under,far,close
2
+ left,1.0,0.9601641,0.95128024,0.9552473,0.9148139,0.9127438
3
+ right,0.9601641,1.0,0.947104,0.9543113,0.9126753,0.91043645
4
+ above,0.95128024,0.947104,1.0000002,0.98705524,0.9433115,0.9390137
5
+ under,0.9552473,0.9543113,0.98705524,1.0000002,0.9456221,0.9441068
6
+ far,0.9148139,0.9126753,0.9433115,0.9456221,1.0000004,0.99866474
7
+ close,0.9127438,0.91043645,0.9390137,0.9441068,0.99866474,1.0