croissant.json: cumulative update (RAI dual keys, citation, new title/authors)
Browse files- croissant.json +3 -2
croissant.json
CHANGED
|
@@ -110,7 +110,7 @@
|
|
| 110 |
"English"
|
| 111 |
],
|
| 112 |
"inLanguage": "en",
|
| 113 |
-
"citeAs": "
|
| 114 |
"prov:wasDerivedFrom": [
|
| 115 |
{
|
| 116 |
"@type": "sc:Dataset",
|
|
@@ -650,5 +650,6 @@
|
|
| 650 |
"rai:dataUseCases": "VANTAGE-Bench is designed for (1) zero-shot evaluation of vision-language models on fixed-camera ('Observer AI') video understanding across spatial, temporal, and semantic reasoning; (2) measuring the 'Observer AI Gap' between model performance on consumer / cinematic video and on real fixed-infrastructure footage; (3) measuring the sim-to-real gap by comparing accuracy on DriveSim-generated and real-world matched scenes; and (4) studying stateless, single-pass VLM tracking via VANTAGE-SOT \u2014 the first quantitative evaluation protocol for VLMs as direct trackers. Validated tasks and metrics: VQA (Top-1 accuracy), Event Verification (macro F1), Temporal Localization (mIoU + Precision@0.5), Dense Video Captioning (SODA_c), 2D Object Localization (COCO mAP / F1), 2D Referring Expressions (Acc@IoU), 2D Spatial Pointing (pointing accuracy), Single-Object Tracking (mean spatial IoU + success AUC). The benchmark is not validated for and should not be used for model training, multi-camera reasoning, audio-dependent tasks, real-time control, identity recognition, or any clinical, legal, or safety-critical decision support.",
|
| 651 |
"rai:dataSocialImpact": "VANTAGE-Bench enables, for the first time, language-grounded evaluation of fixed-infrastructure video understanding across warehouse safety, transportation ITS, and smart spaces in one unified benchmark. By revealing the 'Observer AI Gap' (the measured drop in performance when frontier VLMs move from consumer video to fixed-infrastructure footage), it discourages premature claims of operational readiness for VLM-based safety systems. Foreseeable misuse risks include (a) using the released clips and visible annotations to train or fine-tune model weights, which would contaminate the held-out evaluation and invalidate published benchmark scores; (b) repurposing models tuned against this benchmark for surveillance against unredacted live feeds; (c) treating benchmark scores as evidence of deployment readiness for automated insurance, policing, or HR / disciplinary decisions; and (d) over-trusting strong sim-only performance on the synthetic tracking subset. Mitigations: the benchmark is evaluation-only with server-side held-out ground truth (preventing use as a training set); the dataset repository is gated, so users must accept the terms of use before downloading; the license explicitly prohibits training, fine-tuning, or any use other than evaluation; automated face and license-plate anonymization; and explicit limitations and use-case boundaries published with the benchmark.",
|
| 652 |
"rai:personalSensitiveInformation": "Source footage was captured in semi-public spaces (warehouses, traffic intersections, smart-space environments) and incidentally contains people and vehicles. Faces and vehicle license plates have been obfuscated by an automated de-identification pipeline. The released benchmark contains no direct personally identifiable information (PII) \u2014 no names, no audio transcripts, no demographic metadata, no health or financial data. Geographic identifiers (intersection or facility names) appear in some filenames. Any clips originally sourced from third-party YouTube re-uploads are not redistributed; users obtain those from the original source themselves. An ethics review was conducted by NVIDIA in addition to the technical anonymization step.",
|
| 653 |
-
"rai:hasSyntheticData": true
|
|
|
|
| 654 |
}
|
|
|
|
| 110 |
"English"
|
| 111 |
],
|
| 112 |
"inLanguage": "en",
|
| 113 |
+
"citeAs": "Bhat, Z. P., Nayyar, N., Jain, A., Chan, L. F., Suchanek, J., Wang, Y., Praveen, V., Kornuta, T., & Murali, V. N. (2026). VANTAGE-Bench: Evaluating the Infrastructure AI Gap in Vision-Language Models. In Advances in Neural Information Processing Systems (NeurIPS) Evaluations & Datasets Track.",
|
| 114 |
"prov:wasDerivedFrom": [
|
| 115 |
{
|
| 116 |
"@type": "sc:Dataset",
|
|
|
|
| 650 |
"rai:dataUseCases": "VANTAGE-Bench is designed for (1) zero-shot evaluation of vision-language models on fixed-camera ('Observer AI') video understanding across spatial, temporal, and semantic reasoning; (2) measuring the 'Observer AI Gap' between model performance on consumer / cinematic video and on real fixed-infrastructure footage; (3) measuring the sim-to-real gap by comparing accuracy on DriveSim-generated and real-world matched scenes; and (4) studying stateless, single-pass VLM tracking via VANTAGE-SOT \u2014 the first quantitative evaluation protocol for VLMs as direct trackers. Validated tasks and metrics: VQA (Top-1 accuracy), Event Verification (macro F1), Temporal Localization (mIoU + Precision@0.5), Dense Video Captioning (SODA_c), 2D Object Localization (COCO mAP / F1), 2D Referring Expressions (Acc@IoU), 2D Spatial Pointing (pointing accuracy), Single-Object Tracking (mean spatial IoU + success AUC). The benchmark is not validated for and should not be used for model training, multi-camera reasoning, audio-dependent tasks, real-time control, identity recognition, or any clinical, legal, or safety-critical decision support.",
|
| 651 |
"rai:dataSocialImpact": "VANTAGE-Bench enables, for the first time, language-grounded evaluation of fixed-infrastructure video understanding across warehouse safety, transportation ITS, and smart spaces in one unified benchmark. By revealing the 'Observer AI Gap' (the measured drop in performance when frontier VLMs move from consumer video to fixed-infrastructure footage), it discourages premature claims of operational readiness for VLM-based safety systems. Foreseeable misuse risks include (a) using the released clips and visible annotations to train or fine-tune model weights, which would contaminate the held-out evaluation and invalidate published benchmark scores; (b) repurposing models tuned against this benchmark for surveillance against unredacted live feeds; (c) treating benchmark scores as evidence of deployment readiness for automated insurance, policing, or HR / disciplinary decisions; and (d) over-trusting strong sim-only performance on the synthetic tracking subset. Mitigations: the benchmark is evaluation-only with server-side held-out ground truth (preventing use as a training set); the dataset repository is gated, so users must accept the terms of use before downloading; the license explicitly prohibits training, fine-tuning, or any use other than evaluation; automated face and license-plate anonymization; and explicit limitations and use-case boundaries published with the benchmark.",
|
| 652 |
"rai:personalSensitiveInformation": "Source footage was captured in semi-public spaces (warehouses, traffic intersections, smart-space environments) and incidentally contains people and vehicles. Faces and vehicle license plates have been obfuscated by an automated de-identification pipeline. The released benchmark contains no direct personally identifiable information (PII) \u2014 no names, no audio transcripts, no demographic metadata, no health or financial data. Geographic identifiers (intersection or facility names) appear in some filenames. Any clips originally sourced from third-party YouTube re-uploads are not redistributed; users obtain those from the original source themselves. An ethics review was conducted by NVIDIA in addition to the technical anonymization step.",
|
| 653 |
+
"rai:hasSyntheticData": true,
|
| 654 |
+
"citation": "Bhat, Z. P., Nayyar, N., Jain, A., Chan, L. F., Suchanek, J., Wang, Y., Praveen, V., Kornuta, T., & Murali, V. N. (2026). VANTAGE-Bench: Evaluating the Infrastructure AI Gap in Vision-Language Models. In Advances in Neural Information Processing Systems (NeurIPS) Evaluations & Datasets Track."
|
| 655 |
}
|