Datasets:

Modalities:
Image
Video
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:

Add Croissant 1.1 metadata (croissant.json)

#4
Files changed (1) hide show
  1. croissant.json +371 -0
croissant.json ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "@context": {
3
+ "@language": "en",
4
+ "@vocab": "https://schema.org/",
5
+ "sc": "https://schema.org/",
6
+ "cr": "http://mlcommons.org/croissant/",
7
+ "rai": "http://mlcommons.org/croissant/RAI/",
8
+ "dct": "http://purl.org/dc/terms/",
9
+ "prov": "http://www.w3.org/ns/prov#",
10
+ "citeAs": "cr:citeAs",
11
+ "column": "cr:column",
12
+ "conformsTo": "dct:conformsTo",
13
+ "containedIn": "cr:containedIn",
14
+ "data": {
15
+ "@id": "cr:data",
16
+ "@type": "@json"
17
+ },
18
+ "dataType": {
19
+ "@id": "cr:dataType",
20
+ "@type": "@vocab"
21
+ },
22
+ "extract": "cr:extract",
23
+ "field": "cr:field",
24
+ "fileProperty": "cr:fileProperty",
25
+ "fileObject": "cr:fileObject",
26
+ "fileSet": "cr:fileSet",
27
+ "format": "cr:format",
28
+ "includes": "cr:includes",
29
+ "isLiveDataset": "cr:isLiveDataset",
30
+ "jsonPath": "cr:jsonPath",
31
+ "key": "cr:key",
32
+ "md5": "cr:md5",
33
+ "parentField": "cr:parentField",
34
+ "path": "cr:path",
35
+ "recordSet": "cr:recordSet",
36
+ "references": "cr:references",
37
+ "regex": "cr:regex",
38
+ "repeated": "cr:repeated",
39
+ "replace": "cr:replace",
40
+ "separator": "cr:separator",
41
+ "source": "cr:source",
42
+ "subField": "cr:subField",
43
+ "transform": "cr:transform",
44
+ "samplingRate": "cr:samplingRate",
45
+ "equivalentProperty": "cr:equivalentProperty"
46
+ },
47
+ "@type": "sc:Dataset",
48
+ "conformsTo": [
49
+ "http://mlcommons.org/croissant/1.1",
50
+ "http://mlcommons.org/croissant/RAI/1.0"
51
+ ],
52
+ "name": "PhysicalAI-VANTAGE-Bench",
53
+ "alternateName": [
54
+ "VANTAGE-Bench",
55
+ "Video ANalysis Tasks Across Generalized Environments"
56
+ ],
57
+ "description": "VANTAGE-Bench is the first public benchmark purpose-built for evaluating vision-language models on video captured by fixed infrastructure cameras (the 'Observer AI' setting). It spans three real-world domains — Warehouse, Transportation / ITS, and Smart Spaces — across eight spatio-temporal video understanding tasks: VQA, Event Verification, Temporal Localization, Dense Video Captioning, 2D Object Localization, 2D Referring Expressions, 2D Spatial Pointing, and stateless single-object Tracking. The benchmark contains approximately 312 unique videos and 1,864 images with 27,003 expert-curated annotations, and is intended for evaluation only; ground-truth annotations are held server-side.",
58
+ "url": "https://huggingface.co/datasets/nvidia/PhysicalAI-VANTAGE-Bench",
59
+ "license": "https://huggingface.co/datasets/nvidia/PhysicalAI-VANTAGE-Bench/blob/main/LICENSE",
60
+ "version": "1.0.0",
61
+ "datePublished": "2026-04-24",
62
+ "creator": [
63
+ {
64
+ "@type": "Organization",
65
+ "name": "NVIDIA Corporation",
66
+ "url": "https://huggingface.co/nvidia"
67
+ },
68
+ {
69
+ "@type": "Organization",
70
+ "name": "Clemson University",
71
+ "url": "https://www.clemson.edu"
72
+ }
73
+ ],
74
+ "publisher": {
75
+ "@type": "Organization",
76
+ "name": "NVIDIA Corporation",
77
+ "url": "https://huggingface.co/nvidia"
78
+ },
79
+ "keywords": [
80
+ "video understanding",
81
+ "fixed-camera",
82
+ "observer ai",
83
+ "warehouse safety",
84
+ "transportation",
85
+ "smart spaces",
86
+ "visual question answering",
87
+ "event verification",
88
+ "temporal grounding",
89
+ "dense video captioning",
90
+ "object detection",
91
+ "referring expressions",
92
+ "spatial pointing",
93
+ "single-object tracking",
94
+ "anonymized",
95
+ "evaluation-only",
96
+ "English"
97
+ ],
98
+ "inLanguage": "en",
99
+ "citeAs": "Chan, L. F., Nayyar, N., Suchanek, J., Russell, C., Jain, A., Praveen, V., Murali, V. N., Bhat, Z., Wang, Y., & Kornuta, T. (2026). VANTAGE-Bench: Video Analysis Tasks Across Generalized Environments for Observer AI. In Advances in Neural Information Processing Systems (NeurIPS) Evaluations & Datasets Track.",
100
+ "rai:dataCollection": "VANTAGE-Bench combines three streams of fixed-infrastructure footage. (1) Warehouse and Smart-Spaces footage was captured with GoPro cameras at multiple US-based facilities by NVIDIA's contracted vendors, under collection agreements that include informed consent and authorize redistribution. (2) Transportation footage was provided by the City of Dubuque (Iowa) under a data-use agreement that explicitly permits redistribution as part of VANTAGE-Bench. (3) A portion of VQA, Temporal, and DVC clips, plus the entire single-object tracking subset, are synthetic: VQA/Temporal/DVC synthetic clips were generated on NVIDIA DriveSim Omniverse, and the tracking subset adapts AI City Challenge Track 1 multi-camera warehouse data. The 2D Referring Expressions subset reuses the publicly released RefDrone dataset. Collection has been ongoing since 2016. All real footage was de-identified by an automated face-and-license-plate anonymization pipeline.",
101
+ "rai:dataCollectionType": [
102
+ "Sensor Recording",
103
+ "Existing Datasets",
104
+ "Simulations",
105
+ "Manual Human Curation"
106
+ ],
107
+ "rai:dataCollectionTimeframe": [
108
+ "2016-01-01T00:00:00",
109
+ "ongoing"
110
+ ],
111
+ "rai:personalSensitiveInformation": "Source footage was captured in semi-public spaces (warehouses, traffic intersections, smart-space environments) and incidentally contains people and vehicles. Faces and vehicle license plates have been obfuscated by an automated de-identification pipeline. The released benchmark contains no direct personally identifiable information (PII) — no names, no audio transcripts, no demographic metadata, no health or financial data. Geographic identifiers (intersection or facility names) appear in some filenames. Any clips originally sourced from third-party YouTube re-uploads are not redistributed; users obtain those from the original source themselves. An ethics review was conducted by NVIDIA in addition to the technical anonymization step.",
112
+ "rai:dataLimitations": "VANTAGE-Bench is an evaluation-only benchmark; ground-truth annotations are held server-side and the dataset is not intended for model training or fine-tuning. Coverage is restricted to fixed-camera infrastructure footage in three operational domains and predominantly daylight, fair-weather conditions; nighttime, infrared, heavy snow, and other adverse-weather scenes are out of scope. Audio is not evaluated. The benchmark is single-view: no multi-camera correlation tasks are included. Annotations and supporting prompts are English-only. Anonymization, while audited, is best-effort: small, occluded, or motion-blurred faces and license plates may not have been detected. Strong performance on this benchmark is not evidence of clinical, legal, evidentiary, or safety-critical deployment readiness, and the benchmark should not be used for identity recognition, real-time closed-loop control, or any task requiring multi-camera tracking.",
113
+ "rai:dataBiases": "Real-world footage is concentrated in the United States: warehouse and smart-spaces clips come from US-based contracted vendor facilities, and transportation clips come from a single municipal partner (City of Dubuque, Iowa). The benchmark therefore reflects US infrastructure, signage, and traffic conventions. A portion of VQA / Temporal / DVC clips are synthetic (NVIDIA DriveSim Omniverse), and the single-object-tracking subset is fully synthetic (AI City Challenge Track 1), introducing a sim-to-real distribution shift the benchmark explicitly measures. The 2D Referring Expressions subset is fully aerial drone imagery (RefDrone) and is therefore not representative of ground-mounted infrastructure cameras. The Event Verification subset intentionally includes both positive event examples and plausible negatives; the precise per-class distribution is held out as part of the evaluation. Daylight and clear-weather scenes dominate every track. The 2D Spatial Pointing task is procedurally generated from human-verified bounding boxes, so its distribution inherits the bounding-box source's spatial bias.",
114
+ "rai:dataUseCases": "VANTAGE-Bench is designed for (1) zero-shot evaluation of vision-language models on fixed-camera ('Observer AI') video understanding across spatial, temporal, and semantic reasoning; (2) measuring the 'Observer AI Gap' between model performance on consumer / cinematic video and on real fixed-infrastructure footage; (3) measuring the sim-to-real gap by comparing accuracy on DriveSim-generated and real-world matched scenes; and (4) studying stateless, single-pass VLM tracking via VANTAGE-SOT — the first quantitative evaluation protocol for VLMs as direct trackers. Validated tasks and metrics: VQA (Top-1 accuracy), Event Verification (macro F1), Temporal Localization (mIoU + Precision@0.5), Dense Video Captioning (SODA_c), 2D Object Localization (COCO mAP / F1), 2D Referring Expressions (Acc@IoU), 2D Spatial Pointing (pointing accuracy), Single-Object Tracking (mean spatial IoU + success AUC). The benchmark is not validated for and should not be used for model training, multi-camera reasoning, audio-dependent tasks, real-time control, identity recognition, or any clinical, legal, or safety-critical decision support.",
115
+ "rai:dataSocialImpact": "VANTAGE-Bench enables, for the first time, language-grounded evaluation of fixed-infrastructure video understanding across warehouse safety, transportation ITS, and smart spaces in one unified benchmark. By revealing the 'Observer AI Gap' (the measured drop in performance when frontier VLMs move from consumer video to fixed-infrastructure footage), it discourages premature claims of operational readiness for VLM-based safety systems. Foreseeable misuse risks include (a) using the released clips and visible annotations to train or fine-tune model weights, which would contaminate the held-out evaluation and invalidate published benchmark scores; (b) repurposing models tuned against this benchmark for surveillance against unredacted live feeds; (c) treating benchmark scores as evidence of deployment readiness for automated insurance, policing, or HR / disciplinary decisions; and (d) over-trusting strong sim-only performance on the synthetic tracking subset. Mitigations: the benchmark is evaluation-only with server-side held-out ground truth (preventing use as a training set); the dataset repository is gated, so users must accept the terms of use before downloading; the license explicitly prohibits training, fine-tuning, or any use other than evaluation; automated face and license-plate anonymization; and explicit limitations and use-case boundaries published with the benchmark.",
116
+ "rai:hasSyntheticData": true,
117
+ "prov:wasDerivedFrom": [
118
+ {
119
+ "@type": "sc:Dataset",
120
+ "@id": "https://www.aicitychallenge.org/2026-data-and-evaluation/",
121
+ "name": "AI City Challenge Track 1 - multi-camera synthetic warehouse",
122
+ "datePublished": "2021-01-12",
123
+ "version": "2021",
124
+ "description": "Synthetic multi-camera warehouse data from the AI City Challenge Track 1, used as the source for VANTAGE-SOT (single-object tracking) after re-stratification by motion profile.",
125
+ "license": "https://creativecommons.org/licenses/by/4.0/"
126
+ },
127
+ {
128
+ "@type": "sc:Dataset",
129
+ "@id": "https://arxiv.org/abs/2502.00392",
130
+ "name": "RefDrone",
131
+ "description": "RefDrone is an aerial referring-expression dataset (Sun et al., 2025). The full RefDrone images and expressions are reused in VANTAGE-Bench's VANTAGE-RefExpr sub-track.",
132
+ "license": "Reusable per the RefDrone authors' release terms; see paper for details.",
133
+ "datePublished": "2025-02-01",
134
+ "version": "1.0",
135
+ "citation": "Sun, Z., Zou, Y., Sun, X., Feng, Y., Diao, W., Yan, M., & Fu, K. (2025). RefDrone: A Challenging Benchmark for Referring Expression Comprehension in Drone Scenes. arXiv:2502.00392."
136
+ }
137
+ ],
138
+ "prov:wasGeneratedBy": [
139
+ {
140
+ "@type": "prov:Activity",
141
+ "@id": "data-acquisition-dubuque",
142
+ "prov:label": "Transportation data acquisition (City of Dubuque)",
143
+ "description": "Fixed-camera highway / ITS footage acquired under a data-use agreement with the City of Dubuque, Iowa. The agreement permits redistribution as part of VANTAGE-Bench. Coverage spans the city's deployed traffic-camera network.",
144
+ "prov:wasAttributedTo": {
145
+ "@type": "prov:Agent",
146
+ "name": "City of Dubuque, Iowa"
147
+ }
148
+ },
149
+ {
150
+ "@type": "prov:Activity",
151
+ "@id": "vendor-collection",
152
+ "prov:label": "Warehouse and smart-spaces footage collection",
153
+ "description": "GoPro fixed-camera recordings collected at multiple US-based warehouse and smart-spaces facilities by contracted vendors. Subjects were informed and consented to recording; vendor agreements authorize redistribution under VANTAGE-Bench.",
154
+ "prov:wasAttributedTo": {
155
+ "@type": "prov:Agent",
156
+ "name": "Contracted data-collection vendors (US-based facilities)"
157
+ }
158
+ },
159
+ {
160
+ "@type": "prov:Activity",
161
+ "@id": "synthetic-generation",
162
+ "prov:label": "Synthetic clip generation",
163
+ "description": "A portion of VQA / Temporal / DVC clips were rendered on the NVIDIA DriveSim Omniverse simulator (collision and multi-camera scenarios). Synthetic data is included intentionally to allow direct sim-to-real comparison.",
164
+ "prov:wasAttributedTo": {
165
+ "@type": "prov:SoftwareAgent",
166
+ "name": "NVIDIA DriveSim Omniverse"
167
+ }
168
+ },
169
+ {
170
+ "@type": "prov:Activity",
171
+ "@id": "anonymization",
172
+ "prov:label": "Face and license-plate anonymization",
173
+ "description": "All real-world footage and frames were processed by an automated face- and license-plate-detection pipeline that applied mosaic blur to detected regions. Residual risk: small or occluded faces / plates may remain visible.",
174
+ "prov:wasAttributedTo": {
175
+ "@type": "prov:SoftwareAgent",
176
+ "name": "NVIDIA face-and-plate anonymization pipeline"
177
+ }
178
+ },
179
+ {
180
+ "@type": "prov:Activity",
181
+ "@id": "curation",
182
+ "prov:label": "Clip curation and trimming",
183
+ "description": "Curation — including selection of clips around safety-critical incidents (collisions, tailgating, near-misses, zone breaches) and trimming of long recordings into evaluable clips — was performed by the research authors using domain-specific rulebooks.",
184
+ "prov:wasAttributedTo": {
185
+ "@type": "prov:Agent",
186
+ "name": "VANTAGE-Bench research authors (NVIDIA + Clemson University)"
187
+ }
188
+ },
189
+ {
190
+ "@type": "prov:Activity",
191
+ "@id": "human-annotation",
192
+ "prov:label": "Human annotation",
193
+ "description": "Annotations for VQA, Event Verification, Temporal Localization, Dense Video Captioning, and 2D Object Localization were authored by trained NVIDIA Data Factory professionals (not crowdsourced). Annotators worked from domain-specific rulebooks defining safety-critical incident boundaries and spatial coordinate conventions. A secondary QA expert verified every bounding box, temporal segment, and caption before inclusion. Ten percent of human-annotated samples were independently cross-validated by a separate QA expert to establish construct validity, label consistency, and label reliability.",
194
+ "prov:wasAttributedTo": {
195
+ "@type": "prov:Agent",
196
+ "name": "NVIDIA Data Factory (trained professional annotators)"
197
+ }
198
+ },
199
+ {
200
+ "@type": "prov:Activity",
201
+ "@id": "pseudo-labeling",
202
+ "prov:label": "Pseudo-labeling for spatial pointing",
203
+ "description": "The 2D Spatial Pointing (VANTAGE-2DPoint) task is generated by a pseudo-labeling pipeline that converts human-verified bounding boxes into spatial-reasoning question-answer pairs via templated heuristics over relative position, distance, and overlap.",
204
+ "prov:wasAttributedTo": {
205
+ "@type": "prov:SoftwareAgent",
206
+ "name": "VANTAGE-2DPoint pseudo-labeling pipeline"
207
+ }
208
+ }
209
+ ],
210
+ "distribution": [
211
+ {
212
+ "@type": "cr:FileObject",
213
+ "@id": "repo",
214
+ "name": "repo",
215
+ "description": "The Hugging Face git repository hosting VANTAGE-Bench.",
216
+ "contentUrl": "https://huggingface.co/datasets/nvidia/PhysicalAI-VANTAGE-Bench",
217
+ "encodingFormat": "git+https",
218
+ "sha256": "https://github.com/mlcommons/croissant/issues/80"
219
+ },
220
+ {
221
+ "@type": "cr:FileSet",
222
+ "@id": "videos-vqa",
223
+ "name": "videos-vqa",
224
+ "description": "Anonymized .mp4 video clips for the Visual Question Answering sub-track.",
225
+ "containedIn": {
226
+ "@id": "repo"
227
+ },
228
+ "encodingFormat": "video/mp4",
229
+ "includes": "data/VQA/**/*.mp4"
230
+ },
231
+ {
232
+ "@type": "cr:FileSet",
233
+ "@id": "videos-event-verification",
234
+ "name": "videos-event-verification",
235
+ "description": "Anonymized .mp4 video clips for the Event Verification sub-track (its_collision, tailgating, warehouse_near_miss, metropolis_event_verification, plus filtered subsets).",
236
+ "containedIn": {
237
+ "@id": "repo"
238
+ },
239
+ "encodingFormat": "video/mp4",
240
+ "includes": "data/event_verification_subset/**/*.mp4"
241
+ },
242
+ {
243
+ "@type": "cr:FileSet",
244
+ "@id": "videos-temporal",
245
+ "name": "videos-temporal",
246
+ "description": "Anonymized .mp4 video clips for the Temporal Localization sub-track.",
247
+ "containedIn": {
248
+ "@id": "repo"
249
+ },
250
+ "encodingFormat": "video/mp4",
251
+ "includes": "data/Temporal/**/*.mp4"
252
+ },
253
+ {
254
+ "@type": "cr:FileSet",
255
+ "@id": "videos-dense-video-caption",
256
+ "name": "videos-dense-video-caption",
257
+ "description": "Anonymized .mp4 video clips for the Dense Video Captioning sub-track.",
258
+ "containedIn": {
259
+ "@id": "repo"
260
+ },
261
+ "encodingFormat": "video/mp4",
262
+ "includes": "data/Dense Video Caption/**/*.mp4"
263
+ },
264
+ {
265
+ "@type": "cr:FileSet",
266
+ "@id": "images-2d-bbox",
267
+ "name": "images-2d-bbox",
268
+ "description": "Anonymized .jpg frames for the 2D Object Localization (and related spatial) sub-tracks.",
269
+ "containedIn": {
270
+ "@id": "repo"
271
+ },
272
+ "encodingFormat": "image/jpeg",
273
+ "includes": "data/2dbbox/**/*.jpg"
274
+ },
275
+ {
276
+ "@type": "cr:FileSet",
277
+ "@id": "annotations",
278
+ "name": "annotations",
279
+ "description": "Public annotation files (CSV / TSV / JSON) describing the benchmark structure. Held-out evaluation labels are kept server-side and are not part of this distribution.",
280
+ "containedIn": {
281
+ "@id": "repo"
282
+ },
283
+ "encodingFormat": "application/json",
284
+ "includes": "annotations/**/*"
285
+ }
286
+ ],
287
+ "recordSet": [
288
+ {
289
+ "@type": "cr:RecordSet",
290
+ "@id": "videos",
291
+ "name": "videos",
292
+ "description": "One record per anonymized video clip across all four video sub-tracks.",
293
+ "key": {
294
+ "@id": "videos/file_path"
295
+ },
296
+ "field": [
297
+ {
298
+ "@type": "cr:Field",
299
+ "@id": "videos/file_path",
300
+ "name": "file_path",
301
+ "description": "Path to the video file relative to the dataset root.",
302
+ "dataType": "sc:Text",
303
+ "source": {
304
+ "fileSet": {
305
+ "@id": "videos-vqa"
306
+ },
307
+ "extract": {
308
+ "fileProperty": "fullpath"
309
+ }
310
+ }
311
+ },
312
+ {
313
+ "@type": "cr:Field",
314
+ "@id": "videos/content",
315
+ "name": "content",
316
+ "description": "Anonymized video content.",
317
+ "dataType": "sc:VideoObject",
318
+ "source": {
319
+ "fileSet": {
320
+ "@id": "videos-vqa"
321
+ },
322
+ "extract": {
323
+ "fileProperty": "content"
324
+ }
325
+ }
326
+ }
327
+ ]
328
+ },
329
+ {
330
+ "@type": "cr:RecordSet",
331
+ "@id": "images",
332
+ "name": "images",
333
+ "description": "One record per anonymized image frame for the spatial sub-tracks.",
334
+ "key": {
335
+ "@id": "images/file_path"
336
+ },
337
+ "field": [
338
+ {
339
+ "@type": "cr:Field",
340
+ "@id": "images/file_path",
341
+ "name": "file_path",
342
+ "description": "Path to the image file relative to the dataset root.",
343
+ "dataType": "sc:Text",
344
+ "source": {
345
+ "fileSet": {
346
+ "@id": "images-2d-bbox"
347
+ },
348
+ "extract": {
349
+ "fileProperty": "fullpath"
350
+ }
351
+ }
352
+ },
353
+ {
354
+ "@type": "cr:Field",
355
+ "@id": "images/content",
356
+ "name": "content",
357
+ "description": "Anonymized JPEG frame.",
358
+ "dataType": "sc:ImageObject",
359
+ "source": {
360
+ "fileSet": {
361
+ "@id": "images-2d-bbox"
362
+ },
363
+ "extract": {
364
+ "fileProperty": "content"
365
+ }
366
+ }
367
+ }
368
+ ]
369
+ }
370
+ ]
371
+ }