File size: 30,131 Bytes
7cffc2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
{
    "1706.03762": {
        "arxivId": "1706.03762",
        "title": "Attention is All you Need"
    },
    "1405.0312": {
        "arxivId": "1405.0312",
        "title": "Microsoft COCO: Common Objects in Context"
    },
    "2005.14165": {
        "arxivId": "2005.14165",
        "title": "Language Models are Few-Shot Learners"
    },
    "2103.00020": {
        "arxivId": "2103.00020",
        "title": "Learning Transferable Visual Models From Natural Language Supervision"
    },
    "2006.11239": {
        "arxivId": "2006.11239",
        "title": "Denoising Diffusion Probabilistic Models"
    },
    "2112.10752": {
        "arxivId": "2112.10752",
        "title": "High-Resolution Image Synthesis with Latent Diffusion Models"
    },
    "2005.12872": {
        "arxivId": "2005.12872",
        "title": "End-to-End Object Detection with Transformers"
    },
    "2203.02155": {
        "arxivId": "2203.02155",
        "title": "Training language models to follow instructions with human feedback"
    },
    "2302.13971": {
        "arxivId": "2302.13971",
        "title": "LLaMA: Open and Efficient Foundation Language Models"
    },
    "2303.08774": {
        "arxivId": "2303.08774",
        "title": "GPT-4 Technical Report"
    },
    "2307.09288": {
        "arxivId": "2307.09288",
        "title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
    },
    "2201.11903": {
        "arxivId": "2201.11903",
        "title": "Chain of Thought Prompting Elicits Reasoning in Large Language Models"
    },
    "1412.2306": {
        "arxivId": "1412.2306",
        "title": "Deep visual-semantic alignments for generating image descriptions"
    },
    "1602.07332": {
        "arxivId": "1602.07332",
        "title": "Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations"
    },
    "1505.00468": {
        "arxivId": "1505.00468",
        "title": "VQA: Visual Question Answering"
    },
    "2304.02643": {
        "arxivId": "2304.02643",
        "title": "Segment Anything"
    },
    "1411.5726": {
        "arxivId": "1411.5726",
        "title": "CIDEr: Consensus-based image description evaluation"
    },
    "1707.07998": {
        "arxivId": "1707.07998",
        "title": "Bottom-Up and Top-Down Attention for Image Captioning and Visual Question Answering"
    },
    "2205.11916": {
        "arxivId": "2205.11916",
        "title": "Large Language Models are Zero-Shot Reasoners"
    },
    "2109.01652": {
        "arxivId": "2109.01652",
        "title": "Finetuned Language Models Are Zero-Shot Learners"
    },
    "2201.12086": {
        "arxivId": "2201.12086",
        "title": "BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation"
    },
    "2301.12597": {
        "arxivId": "2301.12597",
        "title": "BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models"
    },
    "2210.11416": {
        "arxivId": "2210.11416",
        "title": "Scaling Instruction-Finetuned Language Models"
    },
    "2204.14198": {
        "arxivId": "2204.14198",
        "title": "Flamingo: a Visual Language Model for Few-Shot Learning"
    },
    "2304.08485": {
        "arxivId": "2304.08485",
        "title": "Visual Instruction Tuning"
    },
    "2210.08402": {
        "arxivId": "2210.08402",
        "title": "LAION-5B: An open large-scale dataset for training next generation image-text models"
    },
    "2305.18290": {
        "arxivId": "2305.18290",
        "title": "Direct Preference Optimization: Your Language Model is Secretly a Reward Model"
    },
    "1505.04870": {
        "arxivId": "1505.04870",
        "title": "Flickr30k Entities: Collecting Region-to-Phrase Correspondences for Richer Image-to-Sentence Models"
    },
    "2303.18223": {
        "arxivId": "2303.18223",
        "title": "A Survey of Large Language Models"
    },
    "2304.07193": {
        "arxivId": "2304.07193",
        "title": "DINOv2: Learning Robust Visual Features without Supervision"
    },
    "2101.03961": {
        "arxivId": "2101.03961",
        "title": "Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity"
    },
    "2212.10560": {
        "arxivId": "2212.10560",
        "title": "Self-Instruct: Aligning Language Models with Self-Generated Instructions"
    },
    "2107.07651": {
        "arxivId": "2107.07651",
        "title": "Align before Fuse: Vision and Language Representation Learning with Momentum Distillation"
    },
    "2009.01325": {
        "arxivId": "2009.01325",
        "title": "Learning to summarize from human feedback"
    },
    "2110.08207": {
        "arxivId": "2110.08207",
        "title": "Multitask Prompted Training Enables Zero-Shot Task Generalization"
    },
    "2310.03744": {
        "arxivId": "2310.03744",
        "title": "Improved Baselines with Visual Instruction Tuning"
    },
    "2305.06500": {
        "arxivId": "2305.06500",
        "title": "InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning"
    },
    "2304.10592": {
        "arxivId": "2304.10592",
        "title": "MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models"
    },
    "1312.6211": {
        "arxivId": "1312.6211",
        "title": "An Empirical Investigation of Catastrophic Forgeting in Gradient-Based Neural Networks"
    },
    "1909.08593": {
        "arxivId": "1909.08593",
        "title": "Fine-Tuning Language Models from Human Preferences"
    },
    "2302.04761": {
        "arxivId": "2302.04761",
        "title": "Toolformer: Language Models Can Teach Themselves to Use Tools"
    },
    "2303.03378": {
        "arxivId": "2303.03378",
        "title": "PaLM-E: An Embodied Multimodal Language Model"
    },
    "2111.02114": {
        "arxivId": "2111.02114",
        "title": "LAION-400M: Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs"
    },
    "2112.09332": {
        "arxivId": "2112.09332",
        "title": "WebGPT: Browser-assisted question-answering with human feedback"
    },
    "2203.03605": {
        "arxivId": "2203.03605",
        "title": "DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection"
    },
    "2104.08786": {
        "arxivId": "2104.08786",
        "title": "Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity"
    },
    "2102.08981": {
        "arxivId": "2102.08981",
        "title": "Conceptual 12M: Pushing Web-Scale Image-Text Pre-Training To Recognize Long-Tail Visual Concepts"
    },
    "2309.16609": {
        "arxivId": "2309.16609",
        "title": "Qwen Technical Report"
    },
    "2205.10625": {
        "arxivId": "2205.10625",
        "title": "Least-to-Most Prompting Enables Complex Reasoning in Large Language Models"
    },
    "1811.10830": {
        "arxivId": "1811.10830",
        "title": "From Recognition to Cognition: Visual Commonsense Reasoning"
    },
    "2202.03052": {
        "arxivId": "2202.03052",
        "title": "OFA: Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence Learning Framework"
    },
    "1906.10770": {
        "arxivId": "1906.10770",
        "title": "Deep Modular Co-Attention Networks for Visual Question Answering"
    },
    "1709.05522": {
        "arxivId": "1709.05522",
        "title": "AISHELL-1: An open-source Mandarin speech corpus and a speech recognition baseline"
    },
    "2209.09513": {
        "arxivId": "2209.09513",
        "title": "Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering"
    },
    "2304.14178": {
        "arxivId": "2304.14178",
        "title": "mPLUG-Owl: Modularization Empowers Large Language Models with Multimodality"
    },
    "2108.10904": {
        "arxivId": "2108.10904",
        "title": "SimVLM: Simple Visual Language Model Pretraining with Weak Supervision"
    },
    "2308.12966": {
        "arxivId": "2308.12966",
        "title": "Qwen-VL: A Frontier Large Vision-Language Model with Versatile Abilities"
    },
    "2303.17580": {
        "arxivId": "2303.17580",
        "title": "HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face"
    },
    "2106.13884": {
        "arxivId": "2106.13884",
        "title": "Multimodal Few-Shot Learning with Frozen Language Models"
    },
    "2303.16199": {
        "arxivId": "2303.16199",
        "title": "LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention"
    },
    "2305.05665": {
        "arxivId": "2305.05665",
        "title": "ImageBind One Embedding Space to Bind Them All"
    },
    "2306.02858": {
        "arxivId": "2306.02858",
        "title": "Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding"
    },
    "2303.04671": {
        "arxivId": "2303.04671",
        "title": "Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models"
    },
    "2307.06281": {
        "arxivId": "2307.06281",
        "title": "MMBench: Is Your Multi-modal Model an All-around Player?"
    },
    "2211.07636": {
        "arxivId": "2211.07636",
        "title": "EVA: Exploring the Limits of Masked Visual Representation Learning at Scale"
    },
    "2204.00598": {
        "arxivId": "2204.00598",
        "title": "Socratic Models: Composing Zero-Shot Multimodal Reasoning with Language"
    },
    "2212.07143": {
        "arxivId": "2212.07143",
        "title": "Reproducible Scaling Laws for Contrastive Language-Image Learning"
    },
    "2306.14824": {
        "arxivId": "2306.14824",
        "title": "Kosmos-2: Grounding Multimodal Large Language Models to the World"
    },
    "2102.02779": {
        "arxivId": "2102.02779",
        "title": "Unifying Vision-and-Language Tasks via Text Generation"
    },
    "2304.03277": {
        "arxivId": "2304.03277",
        "title": "Instruction Tuning with GPT-4"
    },
    "2306.13394": {
        "arxivId": "2306.13394",
        "title": "MME: A Comprehensive Evaluation Benchmark for Multimodal Large Language Models"
    },
    "2304.15010": {
        "arxivId": "2304.15010",
        "title": "LLaMA-Adapter V2: Parameter-Efficient Visual Instruction Model"
    },
    "2305.03726": {
        "arxivId": "2305.03726",
        "title": "Otter: A Multi-Modal Model with In-Context Instruction Tuning"
    },
    "2309.17421": {
        "arxivId": "2309.17421",
        "title": "The Dawn of LMMs: Preliminary Explorations with GPT-4V(ision)"
    },
    "2306.15195": {
        "arxivId": "2306.15195",
        "title": "Shikra: Unleashing Multimodal LLM's Referential Dialogue Magic"
    },
    "2210.03493": {
        "arxivId": "2210.03493",
        "title": "Automatic Chain of Thought Prompting in Large Language Models"
    },
    "2305.10355": {
        "arxivId": "2305.10355",
        "title": "Evaluating Object Hallucination in Large Vision-Language Models"
    },
    "1812.08658": {
        "arxivId": "1812.08658",
        "title": "nocaps: novel object captioning at scale"
    },
    "2306.00890": {
        "arxivId": "2306.00890",
        "title": "LLaVA-Med: Training a Large Language-and-Vision Assistant for Biomedicine in One Day"
    },
    "2311.16502": {
        "arxivId": "2311.16502",
        "title": "MMMU: A Massive Multi-Discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI"
    },
    "2308.02490": {
        "arxivId": "2308.02490",
        "title": "MM-Vet: Evaluating Large Multimodal Models for Integrated Capabilities"
    },
    "2211.10435": {
        "arxivId": "2211.10435",
        "title": "PAL: Program-aided Language Models"
    },
    "1812.05252": {
        "arxivId": "1812.05252",
        "title": "Dynamic Fusion With Intra- and Inter-Modality Attention Flow for Visual Question Answering"
    },
    "2109.05014": {
        "arxivId": "2109.05014",
        "title": "An Empirical Study of GPT-3 for Few-Shot Knowledge-Based VQA"
    },
    "2305.11175": {
        "arxivId": "2305.11175",
        "title": "VisionLLM: Large Language Model is also an Open-Ended Decoder for Vision-Centric Tasks"
    },
    "2305.06355": {
        "arxivId": "2305.06355",
        "title": "VideoChat: Chat-Centric Video Understanding"
    },
    "2311.12793": {
        "arxivId": "2311.12793",
        "title": "ShareGPT4V: Improving Large Multi-Modal Models with Better Captions"
    },
    "2307.16125": {
        "arxivId": "2307.16125",
        "title": "SEED-Bench: Benchmarking Multimodal LLMs with Generative Comprehension"
    },
    "1809.02156": {
        "arxivId": "1809.02156",
        "title": "Object Hallucination in Image Captioning"
    },
    "2306.05424": {
        "arxivId": "2306.05424",
        "title": "Video-ChatGPT: Towards Detailed Video Understanding via Large Vision and Language Models"
    },
    "2303.11381": {
        "arxivId": "2303.11381",
        "title": "MM-REACT: Prompting ChatGPT for Multimodal Reasoning and Action"
    },
    "2303.15389": {
        "arxivId": "2303.15389",
        "title": "EVA-CLIP: Improved Training Techniques for CLIP at Scale"
    },
    "2211.11559": {
        "arxivId": "2211.11559",
        "title": "Visual Programming: Compositional visual reasoning without training"
    },
    "1901.06706": {
        "arxivId": "1901.06706",
        "title": "Visual Entailment: A Novel Task for Fine-Grained Image Understanding"
    },
    "2311.03079": {
        "arxivId": "2311.03079",
        "title": "CogVLM: Visual Expert for Pretrained Language Models"
    },
    "2308.01390": {
        "arxivId": "2308.01390",
        "title": "OpenFlamingo: An Open-Source Framework for Training Large Autoregressive Vision-Language Models"
    },
    "2302.00923": {
        "arxivId": "2302.00923",
        "title": "Multimodal Chain-of-Thought Reasoning in Language Models"
    },
    "2309.05519": {
        "arxivId": "2309.05519",
        "title": "NExT-GPT: Any-to-Any Multimodal LLM"
    },
    "1808.10583": {
        "arxivId": "1808.10583",
        "title": "AISHELL-2: Transforming Mandarin ASR Research Into Industrial Scale"
    },
    "2310.02255": {
        "arxivId": "2310.02255",
        "title": "MathVista: Evaluating Mathematical Reasoning of Foundation Models in Visual Contexts"
    },
    "2212.12017": {
        "arxivId": "2212.12017",
        "title": "OPT-IML: Scaling Language Model Instruction Meta Learning through the Lens of Generalization"
    },
    "2304.09842": {
        "arxivId": "2304.09842",
        "title": "Chameleon: Plug-and-Play Compositional Reasoning with Large Language Models"
    },
    "2308.00692": {
        "arxivId": "2308.00692",
        "title": "LISA: Reasoning Segmentation via Large Language Model"
    },
    "2305.16355": {
        "arxivId": "2305.16355",
        "title": "PandaGPT: One Model To Instruction-Follow Them All"
    },
    "1510.01431": {
        "arxivId": "1510.01431",
        "title": "SentiCap: Generating Image Descriptions with Sentiments"
    },
    "2305.04790": {
        "arxivId": "2305.04790",
        "title": "MultiModal-GPT: A Vision and Language Model for Dialogue with Humans"
    },
    "2305.11000": {
        "arxivId": "2305.11000",
        "title": "SpeechGPT: Empowering Large Language Models with Intrinsic Cross-Modal Conversational Abilities"
    },
    "2209.14610": {
        "arxivId": "2209.14610",
        "title": "Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning"
    },
    "2310.07704": {
        "arxivId": "2310.07704",
        "title": "Ferret: Refer and Ground Anything Anywhere at Any Granularity"
    },
    "2306.12925": {
        "arxivId": "2306.12925",
        "title": "AudioPaLM: A Large Language Model That Can Speak and Listen"
    },
    "2309.14525": {
        "arxivId": "2309.14525",
        "title": "Aligning Large Multimodal Models with Factually Augmented RLHF"
    },
    "2306.05425": {
        "arxivId": "2306.05425",
        "title": "MIMIC-IT: Multi-Modal In-Context Instruction Tuning"
    },
    "2307.03601": {
        "arxivId": "2307.03601",
        "title": "GPT4RoI: Instruction Tuning Large Language Model on Region-of-Interest"
    },
    "2102.09542": {
        "arxivId": "2102.09542",
        "title": "Slake: A Semantically-Labeled Knowledge-Enhanced Dataset For Medical Visual Question Answering"
    },
    "2307.12981": {
        "arxivId": "2307.12981",
        "title": "3D-LLM: Injecting the 3D World into Large Language Models"
    },
    "2305.18752": {
        "arxivId": "2305.18752",
        "title": "GPT4Tools: Teaching Large Language Model to Use Tools via Self-instruction"
    },
    "2311.06607": {
        "arxivId": "2311.06607",
        "title": "Monkey: Image Resolution and Text Label are Important Things for Large Multi-Modal Models"
    },
    "2306.14565": {
        "arxivId": "2306.14565",
        "title": "Mitigating Hallucination in Large Multi-Modal Models via Robust Instruction Tuning"
    },
    "2305.15021": {
        "arxivId": "2305.15021",
        "title": "EmbodiedGPT: Vision-Language Pre-Training via Embodied Chain of Thought"
    },
    "2311.07575": {
        "arxivId": "2311.07575",
        "title": "SPHINX: The Joint Mixing of Weights, Tasks, and Visual Embeddings for Multi-modal Large Language Models"
    },
    "2307.15189": {
        "arxivId": "2307.15189",
        "title": "Med-Flamingo: a Multimodal Medical Few-shot Learner"
    },
    "2303.17395": {
        "arxivId": "2303.17395",
        "title": "WavCaps: A ChatGPT-Assisted Weakly-Labelled Audio Captioning Dataset for Audio-Language Multimodal Research"
    },
    "1711.06475": {
        "arxivId": "1711.06475",
        "title": "AI Challenger : A Large-scale Dataset for Going Deeper in Image Understanding"
    },
    "2003.10286": {
        "arxivId": "2003.10286",
        "title": "PathVQA: 30000+ Questions for Medical Visual Question Answering"
    },
    "2303.02151": {
        "arxivId": "2303.02151",
        "title": "Prompt, Generate, Then Cache: Cascade of Foundation Models Makes Strong Few-Shot Learners"
    },
    "2403.09611": {
        "arxivId": "2403.09611",
        "title": "MM1: Methods, Analysis & Insights from Multimodal LLM Pre-training"
    },
    "2205.12255": {
        "arxivId": "2205.12255",
        "title": "TALM: Tool Augmented Language Models"
    },
    "2306.06687": {
        "arxivId": "2306.06687",
        "title": "LAMM: Language-Assisted Multi-Modal Instruction-Tuning Dataset, Framework, and Benchmark"
    },
    "2306.09093": {
        "arxivId": "2306.09093",
        "title": "Macaw-LLM: Multi-Modal Language Modeling with Image, Audio, Video, and Text Integration"
    },
    "2306.09265": {
        "arxivId": "2306.09265",
        "title": "LVLM-eHub: A Comprehensive Evaluation Benchmark for Large Vision-Language Models"
    },
    "2305.11834": {
        "arxivId": "2305.11834",
        "title": "Pengi: An Audio Language Model for Audio Tasks"
    },
    "2309.07915": {
        "arxivId": "2309.07915",
        "title": "MMICL: Empowering Vision-language Model with Multi-Modal In-Context Learning"
    },
    "2311.16922": {
        "arxivId": "2311.16922",
        "title": "Mitigating Object Hallucinations in Large Vision-Language Models through Visual Contrastive Decoding"
    },
    "2307.05222": {
        "arxivId": "2307.05222",
        "title": "Generative Pretraining in Multimodality"
    },
    "2310.00754": {
        "arxivId": "2310.00754",
        "title": "Analyzing and Mitigating Object Hallucination in Large Vision-Language Models"
    },
    "2312.13771": {
        "arxivId": "2312.13771",
        "title": "AppAgent: Multimodal Agents as Smartphone Users"
    },
    "2306.04387": {
        "arxivId": "2306.04387",
        "title": "M3IT: A Large-Scale Dataset towards Multi-Modal Multilingual Instruction Tuning"
    },
    "2212.10773": {
        "arxivId": "2212.10773",
        "title": "MultiInstruct: Improving Multi-Modal Zero-Shot Learning via Instruction Tuning"
    },
    "2305.16934": {
        "arxivId": "2305.16934",
        "title": "On Evaluating Adversarial Robustness of Large Vision-Language Models"
    },
    "2401.15947": {
        "arxivId": "2401.15947",
        "title": "MoE-LLaVA: Mixture of Experts for Large Vision-Language Models"
    },
    "2312.00849": {
        "arxivId": "2312.00849",
        "title": "RLHF-V: Towards Trustworthy MLLMs via Behavior Alignment from Fine-Grained Correctional Human Feedback"
    },
    "2311.03356": {
        "arxivId": "2311.03356",
        "title": "GLaMM: Pixel Grounding Large Multimodal Model"
    },
    "2309.03905": {
        "arxivId": "2309.03905",
        "title": "ImageBind-LLM: Multi-modality Instruction Tuning"
    },
    "2308.16911": {
        "arxivId": "2308.16911",
        "title": "PointLLM: Empowering Large Language Models to Understand Point Clouds"
    },
    "2305.10415": {
        "arxivId": "2305.10415",
        "title": "PMC-VQA: Visual Instruction Tuning for Medical Visual Question Answering"
    },
    "2310.14566": {
        "arxivId": "2310.14566",
        "title": "HallusionBench: You See What You Think? Or You Think What You See? An Image-Context Reasoning Benchmark Challenging for GPT-4V(ision), LLaVA-1.5, and Other Multi-modality Models"
    },
    "2212.10846": {
        "arxivId": "2212.10846",
        "title": "From Images to Textual Prompts: Zero-shot Visual Question Answering with Frozen Large Language Models"
    },
    "2305.04160": {
        "arxivId": "2305.04160",
        "title": "X-LLM: Bootstrapping Advanced Large Language Models by Treating Multi-Modalities as Foreign Languages"
    },
    "2303.06594": {
        "arxivId": "2303.06594",
        "title": "ChatGPT Asks, BLIP-2 Answers: Automatic Questioning Towards Enriched Visual Descriptions"
    },
    "2307.02499": {
        "arxivId": "2307.02499",
        "title": "mPLUG-DocOwl: Modularized Multimodal Large Language Model for Document Understanding"
    },
    "2305.14167": {
        "arxivId": "2305.14167",
        "title": "DetGPT: Detect What You Need via Reasoning"
    },
    "2211.11682": {
        "arxivId": "2211.11682",
        "title": "PointCLIP V2: Adapting CLIP for Powerful 3D Open-world Learning"
    },
    "2309.16058": {
        "arxivId": "2309.16058",
        "title": "AnyMAL: An Efficient and Scalable Any-Modality Augmented Language Model"
    },
    "2402.11684": {
        "arxivId": "2402.11684",
        "title": "ALLaVA: Harnessing GPT4V-Synthesized Data for Lite Vision-Language Models"
    },
    "2211.16198": {
        "arxivId": "2211.16198",
        "title": "SuS-X: Training-Free Name-Only Transfer of Vision-Language Models"
    },
    "2310.16045": {
        "arxivId": "2310.16045",
        "title": "Woodpecker: Hallucination Correction for Multimodal Large Language Models"
    },
    "2311.07574": {
        "arxivId": "2311.07574",
        "title": "To See is to Believe: Prompting GPT-4V for Better Visual Instruction Tuning"
    },
    "2307.14539": {
        "arxivId": "2307.14539",
        "title": "Jailbreak in pieces: Compositional Adversarial Attacks on Multi-Modal Language Models"
    },
    "2305.15023": {
        "arxivId": "2305.15023",
        "title": "Cheap and Quick: Efficient Vision-Language Instruction Tuning for Large Language Models"
    },
    "2305.02677": {
        "arxivId": "2305.02677",
        "title": "Caption Anything: Interactive Image Description with Diverse Multimodal Controls"
    },
    "2311.07397": {
        "arxivId": "2311.07397",
        "title": "An LLM-free Multi-dimensional Benchmark for MLLMs Hallucination Evaluation"
    },
    "2311.05332": {
        "arxivId": "2311.05332",
        "title": "On the Road with GPT-4V(ision): Early Explorations of Visual-Language Model on Autonomous Driving"
    },
    "2307.02469": {
        "arxivId": "2307.02469",
        "title": "What Matters in Training a GPT4-Style Language Model with Multimodal Inputs?"
    },
    "2402.03766": {
        "arxivId": "2402.03766",
        "title": "MobileVLM V2: Faster and Stronger Baseline for Vision Language Model"
    },
    "2312.14135": {
        "arxivId": "2312.14135",
        "title": "V*: Guided Visual Search as a Core Mechanism in Multimodal LLMs"
    },
    "2202.06767": {
        "arxivId": "2202.06767",
        "title": "Wukong: A 100 Million Large-scale Chinese Cross-modal Pre-training Benchmark"
    },
    "2403.12895": {
        "arxivId": "2403.12895",
        "title": "mPLUG-DocOwl 1.5: Unified Structure Learning for OCR-free Document Understanding"
    },
    "2311.12871": {
        "arxivId": "2311.12871",
        "title": "An Embodied Generalist Agent in 3D World"
    },
    "2310.16436": {
        "arxivId": "2310.16436",
        "title": "DDCoT: Duty-Distinct Chain-of-Thought Prompting for Multimodal Reasoning in Language Models"
    },
    "2402.12226": {
        "arxivId": "2402.12226",
        "title": "AnyGPT: Unified Multimodal LLM with Discrete Sequence Modeling"
    },
    "2310.05126": {
        "arxivId": "2310.05126",
        "title": "UReader: Universal OCR-free Visually-situated Language Understanding with Multimodal Large Language Model"
    },
    "2308.15126": {
        "arxivId": "2308.15126",
        "title": "Evaluation and Analysis of Hallucination in Large Vision-Language Models"
    },
    "2401.16158": {
        "arxivId": "2401.16158",
        "title": "Mobile-Agent: Autonomous Multi-Modal Mobile Device Agent with Visual Perception"
    },
    "2403.04473": {
        "arxivId": "2403.04473",
        "title": "TextMonkey: An OCR-Free Large Multimodal Model for Understanding Document"
    },
    "2309.09971": {
        "arxivId": "2309.09971",
        "title": "MindAgent: Emergent Gaming Interaction"
    },
    "2308.12067": {
        "arxivId": "2308.12067",
        "title": "InstructionGPT-4: A 200-Instruction Paradigm for Fine-Tuning MiniGPT-4"
    },
    "2312.12436": {
        "arxivId": "2312.12436",
        "title": "A Challenger to GPT-4V? Early Explorations of Gemini in Visual Expertise"
    },
    "2312.10665": {
        "arxivId": "2312.10665",
        "title": "Silkie: Preference Distillation for Large Visual Language Models"
    },
    "2312.10032": {
        "arxivId": "2312.10032",
        "title": "Osprey: Pixel Understanding with Visual Instruction Tuning"
    },
    "2305.16103": {
        "arxivId": "2305.16103",
        "title": "ChatBridge: Bridging Modalities with Large Language Model as a Language Catalyst"
    },
    "2305.14705": {
        "arxivId": "2305.14705",
        "title": "Mixture-of-Experts Meets Instruction Tuning: A Winning Combination for Large Language Models"
    },
    "2310.01779": {
        "arxivId": "2310.01779",
        "title": "HallE-Switch: Rethinking and Controlling Object Existence Hallucinations in Large Vision Language Models for Detailed Caption"
    },
    "2305.14985": {
        "arxivId": "2305.14985",
        "title": "IdealGPT: Iteratively Decomposing Vision and Language Reasoning via Large Language Models"
    },
    "2311.18651": {
        "arxivId": "2311.18651",
        "title": "LL3DA: Visual Interactive Instruction Tuning for Omni-3D Understanding, Reasoning, and Planning"
    },
    "2308.12038": {
        "arxivId": "2308.12038",
        "title": "Large Multilingual Models Pivot Zero-Shot Multimodal Learning across Languages"
    },
    "2311.16103": {
        "arxivId": "2311.16103",
        "title": "Video-Bench: A Comprehensive Benchmark and Toolkit for Evaluating Video-based Large Language Models"
    },
    "2310.00582": {
        "arxivId": "2310.00582",
        "title": "Pink: Unveiling the Power of Referential Comprehension for Multi-modal LLMs"
    },
    "2312.06968": {
        "arxivId": "2312.06968",
        "title": "Hallucination Augmented Contrastive Learning for Multimodal Large Language Model"
    },
    "2309.09958": {
        "arxivId": "2309.09958",
        "title": "An Empirical Study of Scaling Instruct-Tuned Large Multimodal Models"
    },
    "2305.02317": {
        "arxivId": "2305.02317",
        "title": "Visual Chain of Thought: Bridging Logical Gaps with Multimodal Infillings"
    },
    "2311.01477": {
        "arxivId": "2311.01477",
        "title": "FAITHSCORE: Evaluating Hallucinations in Large Vision-Language Models"
    },
    "2309.15564": {
        "arxivId": "2309.15564",
        "title": "Jointly Training Large Autoregressive Multimodal Models"
    },
    "2304.07919": {
        "arxivId": "2304.07919",
        "title": "Chain of Thought Prompt Tuning in Vision Language Models"
    },
    "2401.12915": {
        "arxivId": "2401.12915",
        "title": "Red Teaming Visual Language Models"
    },
    "2311.18248": {
        "arxivId": "2311.18248",
        "title": "mPLUG-PaperOwl: Scientific Diagram Analysis with the Multimodal Large Language Model"
    },
    "2312.02153": {
        "arxivId": "2312.02153",
        "title": "Aligning and Prompting Everything All at Once for Universal Visual Perception"
    },
    "2311.01487": {
        "arxivId": "2311.01487",
        "title": "What Makes for Good Visual Instructions? Synthesizing Complex Visual Reasoning Instructions for Visual Instruction Tuning"
    },
    "2308.07891": {
        "arxivId": "2308.07891",
        "title": "Link-Context Learning for Multimodal LLMs"
    },
    "2401.06395": {
        "arxivId": "2401.06395",
        "title": "ModaVerse: Efficiently Transforming Modalities with LLMs"
    },
    "2312.07553": {
        "arxivId": "2312.07553",
        "title": "Hijacking Context in Large Multi-modal Models"
    },
    "2312.02520": {
        "arxivId": "2312.02520",
        "title": "Towards More Unified In-Context Visual Understanding"
    },
    "2305.13903": {
        "arxivId": "2305.13903",
        "title": "Let's Think Frame by Frame: Evaluating Video Chain of Thought with Video Infilling and Prediction"
    }
}