File size: 14,332 Bytes
0488400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6fd14b
 
0488400
 
 
 
 
 
 
 
 
 
 
 
b6fd14b
0488400
 
 
 
 
 
94404d2
 
0488400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0733c49
 
 
 
 
 
0488400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
---
dataset_info:
- config_name: 16_frame
  features:
  - name: id
    dtype: int64
  - name: dataset
    dtype: string
  - name: scene_id
    dtype: string
  - name: question_type
    dtype: string
  - name: question
    dtype: string
  - name: ground_truth
    dtype: string
  - name: options
    sequence: string
  - name: num_frames
    dtype: string
  - name: queried_object_ids
    sequence: int64
  splits:
  - name: test
    num_bytes: 1211030
    num_examples: 4568
  download_size: 147029
  dataset_size: 1211030
- config_name: 32_frame
  features:
  - name: id
    dtype: int64
  - name: dataset
    dtype: string
  - name: scene_id
    dtype: string
  - name: question_type
    dtype: string
  - name: question
    dtype: string
  - name: ground_truth
    dtype: string
  - name: options
    sequence: string
  - name: num_frames
    dtype: string
  - name: queried_object_ids
    sequence: int64
  splits:
  - name: test
    num_bytes: 1769552
    num_examples: 6158
  download_size: 209977
  dataset_size: 1769552
- config_name: 64_frame
  features:
  - name: id
    dtype: int64
  - name: dataset
    dtype: string
  - name: scene_id
    dtype: string
  - name: question_type
    dtype: string
  - name: question
    dtype: string
  - name: ground_truth
    dtype: string
  - name: options
    sequence: string
  - name: num_frames
    dtype: string
  - name: queried_object_ids
    sequence: int64
  splits:
  - name: test
    num_bytes: 1931345
    num_examples: 6616
  download_size: 231397
  dataset_size: 1931345
- config_name: all_frame
  features:
  - name: id
    dtype: int64
  - name: dataset
    dtype: string
  - name: scene_id
    dtype: string
  - name: question_type
    dtype: string
  - name: question
    dtype: string
  - name: ground_truth
    dtype: string
  - name: options
    sequence: string
  - name: num_frames
    dtype: string
  - name: queried_object_ids
    sequence: int64
  splits:
  - name: test
    num_bytes: 2010779
    num_examples: 6808
  download_size: 239453
  dataset_size: 2010779
configs:
- config_name: 16_frame
  data_files:
  - split: test
    path: 16_frame/test-*
- config_name: 32_frame
  data_files:
  - split: test
    path: 32_frame/test-*
- config_name: 64_frame
  data_files:
  - split: test
    path: 64_frame/test-*
- config_name: all_frame
  data_files:
  - split: test
    path: all_frame/test-*
  default: true
task_categories:
- visual-question-answering
language:
- en
size_categories:
- 1K<n<10K
license: apache-2.0
tags:
- Spatial Intelligence
- Vision Language Models
---

<div align="center">
  <img src="metadata/revsi.png" width="350">
  <b>ICML 2026</b>
  <br>
  <a href="https://github.com/eamonn-zh">Yiming Zhang</a><sup>1*</sup>,
  <a href="https://jcchen.me/">Jiacheng Chen</a><sup>1*</sup>,
  <a href="https://christinatan0704.github.io/mysite/">Jiaqi Tan</a><sup>1</sup>,
  <a href="https://sammaoys.github.io/">Yongsen Mao</a><sup>2</sup>,
  <a href="https://wenhuchen.github.io/">Wenhu Chen</a><sup>3</sup>,
  <a href="https://angelxuanchang.github.io/">Angel X. Chang</a><sup>1,4</sup>
  <br>
  <sup>1</sup> Simon Fraser University &nbsp;&nbsp;
  <sup>2</sup> Hong Kong University of Science and Technology
  <br>
  <sup>3</sup> University of Waterloo &nbsp;&nbsp;
  <sup>4</sup> Alberta Machine Intelligence Institute (Amii)
  <br><br>
  <a href="https://3dlg-hcvc.github.io/revsi/">
    <img src="https://img.shields.io/badge/Project%20Page-84C0B8?style=for-the-badge">
  </a>
  <a href="https://github.com/3dlg-hcvc/revsi">
    <img src="https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white">
  </a>
  <a href="https://arxiv.org/abs/2604.24300">
    <img src="https://img.shields.io/badge/arXiv-2604.24300-b31b1b.svg?style=for-the-badge">
  </a>
  <a href="https://revsi.site/">
    <img src="https://img.shields.io/badge/Visualizer-84C0B8?style=for-the-badge&logo=eye&logoColor=white">
  </a>
</div>

This repository contains the <span style="color:#84C0B8;"><b>ReVSI</b></span> benchmark and dataset, introduced in [ReVSI: Rebuilding Visual Spatial Intelligence Evaluation for Accurate Assessment of VLM 3D Reasoning](https://3dlg-hcvc.github.io/revsi/).


## Data Subsets
<span style="color:#84C0B8;"><b>ReVSI</b></span> provides multiple data subsets corresponding to different video frame budgets:
- all-frame
- 64-frame
- 32-frame
- 16-frame

Use the following command to load a specific subset:
```python
from datasets import load_dataset
revsi_dataset = load_dataset("3dlg-hcvc/ReVSI", "64_frame", split="test")  # load the 64-frame subset
```

> [!NOTE]
> **How video subsets are constructed:**
> 
> The **all-frame** subset contains the full processed video sequence for each scene, with standardized resolution and frame rate:
> 1. **ScanNet v2 / ScanNetPP v2 / MultiScan**  
>    *640 × 480 · 10 FPS*
>
> 2. **ARKitScenes**  
>    *640 × 480 / 480 × 640 · 10 FPS (all videos have been rotated to sky-up orientation)*
>
> 3. **3RScan**  
>    *360 × 640 · 4 FPS*
> 
> The fixed-budget subsets are constructed via hierarchical uniform sampling:
> 1. Uniformly sample **64 frames** from **all-frame**
> 2. Uniformly subsample **32 frames** from the **64-frame** set
> 3. Uniformly subsample **16 frames** from the **32-frame** set  
>
> This produces a nested structure: **16-frame****32-frame****64-frame****all-frame**.
> For each video, all subsets cover the same time span, and each sampled frame keeps the same timestamp across subsets. This guarantees consistent timestamps for models with frame timestamp encoding.

## Data Fields
Each entry in <span style="color:#84C0B8;"><b>ReVSI</b></span> dataset contains the following fields:
| Field Name | Type | Description |
| :--------- | :--- | :---------- |
| `id` | int64 | Unique identifier for each sample |
| `dataset` | string | Source dataset of the video |
| `scene_id` | string | Identifier of the scene (video) associated with the sample |
| `question_type` | string | Category of the question |
| `question` | string | Natural language question grounded in the video |
| `options` | list[string] | List of answer choices (only for multiple-choice questions) |
| `ground_truth` | string | Ground-truth answer to the question |
| `num_frames` | string | Frame budget used for evaluation (e.g., 16, 32, 64, all) |
| `queried_object_ids` | list[int64] | List of object instance IDs referenced in the question |

## Evaluation
> [!WARNING]
> Please avoid using PyTorch 2.9, as a known cuDNN issue can lead to significant performance degradation for QwenVL models (see [details](https://github.com/pytorch/pytorch/issues/166122)).

<span style="color:#84C0B8;"><b>ReVSI</b></span> supports inference / evaluation with the following frameworks:
- [LMMs-Eval](https://github.com/eamonn-zh/lmms-eval) (inference + evaluation)
  ```bash
  # example 1: evaluate Qwen3-VL-8B-Instruct on ReVSI 64-frame subset (with huggingface transformers backend on 4 GPUs)
  accelerate launch \
  --num_processes=4 \
  -m lmms_eval \
  --model qwen3_vl \
  --model_args=pretrained=Qwen/Qwen3-VL-8B-Instruct,attn_implementation=flash_attention_2,max_num_frames=64 \
  --tasks revsi_64_frame \
  --batch_size 8
  
  # example 2: evaluate Qwen3-VL-8B-Instruct on ReVSI all-frame subset using 2 fps sampling rate (with vllm backend)
  python -m lmms_eval \
  --model vllm \
  --model_args "model=Qwen/Qwen3-VL-8B-Instruct,fps=2" \
  --tasks revsi_all_frame
  ```

- [VLMEvalKit](https://github.com/eamonn-zh/VLMEvalKit) (inference + evaluation)
  ```bash
  # example 1: evaluate Qwen3-VL-8B-Instruct on ReVSI 32-frame subset (with vllm backend)
  python run.py --data revsi_32_frame --model Qwen3-VL-8B-Instruct
  ```

- [ModelScope SWIFT](https://github.com/modelscope/ms-swift) (inference-only, check [ReVSI GitHub repo](https://github.com/3dlg-hcvc/revsi) for data registration)
  ```bash
  # example 1: infer Qwen3-VL-8B-Instruct on ReVSI 64-frame subset (with huggingface transformers backend on 4 GPUs)
  NPROC_PER_NODE=4 swift infer \
  --model Qwen/Qwen3-VL-8B-Instruct  \
  --model_kwargs '{"fps_min_frames": 64, "fps_max_frames": 64}' \
  --val_dataset 3dlg-hcvc/ReVSI:64_frame \
  --infer_backend transformers \
  --custom_register_path ./ms_swift_register/revsi_register.py \
  --use_hf true \
  --torch_dtype bfloat16 \
  --attn_impl flash_attention_2 \
  --strict true \
  --max_batch_size 8 \
  --temperature 0
  ```

- [TorchMetrics Extension](https://github.com/eamonn-zh/torchmetrics_ext) (evaluation-only)
  ```python
  # example 1: evaluate existing predictions on ReVSI all-frame subset using TorchMetrics Extension evaluator
  from torchmetrics_ext.metrics.vqa import ReVSIMetric

  metric = ReVSIMetric(subset=all_frame)
  predictions = {0: "2", 1: "4", ..., 1000: "A"}  # predictions should be a dict following the format {question_id: response}
  results = metric(pred_dict)
  ```

## Metadata Files
We provide several metadata files used in constructing <span style="color:#84C0B8;"><b>ReVSI</b></span>:

- [metadata/3d_annotation.json](https://huggingface.co/datasets/3dlg-hcvc/ReVSI/blob/main/metadata/3d_annotation.json): 3D annotations for each scene, including object names, oriented bounding boxes and scene area polygons. The schema is as follows:
  ```json
  [
    {
      "scene_id": # scene ID from the source dataset
      "dataset":  # source dataset name
      "scene_area_2d_polygon": # list of 2D boundary points (x, y) defining the scene area polygon, shape (N, 2)
      "scene_area_type":  # scene area annotation type (single_room or multiple_room)
      "objects": [
        {
          "id":  # object id within the scene
          "name":  # open-vocabulary object name
          "obb": {
            "center":  # center of the object oriented bounding boxes, shape (3, )
            "extent":  # extent of the object oriented bounding boxes, shape (3, )
            "rotation":  # rotation matrix of the object oriented bounding boxes, shape (3, 3)
          }
        },
        ...
      ]
    },
    ...
  ]
  ```

- [metadata/sampled_video_frame_idx.json](https://huggingface.co/datasets/3dlg-hcvc/ReVSI/blob/main/metadata/sampled_video_frame_idx.json): indices of sampled frames for the 16/32/64-frame subsets. The scehema is as follows:
  ```json
  {
    "<scene_id>": {
      "64-frame":  # list of sampled frame indices from the all-frame video, shape (64, )
      "32-frame":  # list of sampled frame indices from the all-frame video, shape (32, )
      "16-frame":  # list of sampled frame indices from the all-frame video, shape (16, )
    }
    ...
  }
  ```

- [metadata/obj_visibility.json](https://huggingface.co/datasets/3dlg-hcvc/ReVSI/blob/main/metadata/obj_visibility.json): Object visibility under different video frame budgets. The schema is as follows:
  ```json
  {
    "<scene_id>": [
      {
        "object_id":  # object id within the scene (consistent with metadata/3d_annotation.json)
        "object_name":  # open-vocabulary object name (consistent with metadata/3d_annotation.json)
        "visibility_16":  # visibility under the 16-frame budget
        "visibility_32":  # visibility under the 32-frame budget
        "visibility_64":  # visibility under the 64-frame budget
      },
      ...
    ],
    ...
  }
  ```

- [metadata/tiny_set_question_ids.txt](https://huggingface.co/datasets/3dlg-hcvc/ReVSI/blob/main/metadata/tiny_set_question_ids.txt): The sampled question ids of `tiny` set for proprietary model evaluations.

## Citation
If you find <span style="color:#84C0B8;"><b>ReVSI</b></span> useful for your research, please consider citing:
```bibtex
@article{zhang2026revsi,
  title={ReVSI: Rebuilding Visual Spatial Intelligence Evaluation for Accurate Assessment of VLM 3D Reasoning},
  author={Zhang, Yiming and Chen, Jiacheng and Tan, Jiaqi and Mao, Yongsen and Chen, Wenhu and Chang, Angel X.},
  journal={arXiv preprint arXiv:2604.24300},
  year={2026}
}
```

<span style="color:#84C0B8;"><b>ReVSI</b></span> builds upon the following 3D scene datasets and the VSI-Bench benchmark, please also consider citing:
```bibtex
@inproceedings{dai2017scannet,
  title={Scannet: Richly-annotated 3d reconstructions of indoor scenes},
  author={Dai, Angela and Chang, Angel X and Savva, Manolis and Halber, Maciej and Funkhouser, Thomas and Nie{\ss}ner, Matthias},
  booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
  pages={5828--5839},
  year={2017}
}

@inproceedings{yeshwanth2023scannet++,
  title={Scannet++: A high-fidelity dataset of 3d indoor scenes},
  author={Yeshwanth, Chandan and Liu, Yueh-Cheng and Nie{\ss}ner, Matthias and Dai, Angela},
  booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
  pages={12--22},
  year={2023}
}

@inproceedings{baruch1arkitscenes,
  title={ARKitScenes: A Diverse Real-World Dataset For 3D Indoor Scene Understanding Using Mobile RGB-D Data},
  author={Baruch, Gilad and Chen, Zhuoyuan and Dehghan, Afshin and Feigin, Yuri and Fu, Peter and Gebauer, Thomas and Kurz, Daniel and Dimry, Tal and Joffe, Brandon and Schwartz, Arik and others},
  booktitle={Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1)}
}

@inproceedings{wald2019rio,
  title={Rio: 3d object instance re-localization in changing indoor environments},
  author={Wald, Johanna and Avetisyan, Armen and Navab, Nassir and Tombari, Federico and Nie{\ss}ner, Matthias},
  booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
  pages={7658--7667},
  year={2019}
}

@article{mao2022multiscan,
  title={Multiscan: Scalable rgbd scanning for 3d environments with articulated objects},
  author={Mao, Yongsen and Zhang, Yiming and Jiang, Hanxiao and Chang, Angel and Savva, Manolis},
  journal={Advances in neural information processing systems},
  volume={35},
  pages={9058--9071},
  year={2022}
}

@inproceedings{yang2025thinking,
  title={Thinking in space: How multimodal large language models see, remember, and recall spaces},
  author={Yang, Jihan and Yang, Shusheng and Gupta, Anjali W and Han, Rilyn and Fei-Fei, Li and Xie, Saining},
  booktitle={Proceedings of the Computer Vision and Pattern Recognition Conference},
  pages={10632--10643},
  year={2025}
}
```