-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathindex.html
More file actions
625 lines (593 loc) · 153 KB
/
index.html
File metadata and controls
625 lines (593 loc) · 153 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
<!DOCTYPE html>
<html lang="en" data-theme="dark">
<head>
<meta charset="UTF-8"/>
<meta name="viewport" content="width=device-width,initial-scale=1"/>
<title>Vision-Language Models — Survey Overview</title>
<link rel="preconnect" href="https://fonts.googleapis.com"/>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap" rel="stylesheet"/>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.1/css/all.min.css"/>
<style>
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
:root{
--bg:#0b1120;--bg2:#111827;--surface:#1a2332;--surface2:#243044;
--border:rgba(255,255,255,.06);--text:#e8ecf4;--text2:#8896ab;--text3:#5a6a80;
--radius:10px;--radius-lg:16px;
--accent:#818cf8;--accent2:#6366f1;
--transition:all .25s cubic-bezier(.4,0,.2,1);
}
html{scroll-behavior:smooth;scroll-padding-top:80px}
body{font-family:'Inter',-apple-system,BlinkMacSystemFont,sans-serif;background:var(--bg);color:var(--text);line-height:1.6;-webkit-font-smoothing:antialiased}
a{color:var(--accent);text-decoration:none;transition:color .2s}
a:hover{color:#a5b4fc}
/* ─── HERO ─── */
.hero{position:relative;min-height:55vh;display:flex;align-items:center;justify-content:center;text-align:center;overflow:hidden}
.hero-bg{position:absolute;inset:0}
.hero-bg::before{content:'';position:absolute;inset:0;background:radial-gradient(ellipse at 30% 20%,rgba(99,102,241,.15),transparent 60%),radial-gradient(ellipse at 70% 80%,rgba(6,182,212,.1),transparent 50%),var(--bg)}
.hero-bg::after{content:'';position:absolute;inset:0;background:url('https://images.unsplash.com/photo-1676299081847-824916de030a?w=1400&q=75') center/cover;opacity:.08}
.hero-content{position:relative;z-index:2;max-width:820px;padding:3rem 1.5rem}
.hero h1{font-size:clamp(1.8rem,5vw,3.2rem);font-weight:900;letter-spacing:-.02em;background:linear-gradient(135deg,#818cf8 0%,#06b6d4 50%,#a78bfa 100%);-webkit-background-clip:text;-webkit-text-fill-color:transparent;background-clip:text;margin-bottom:.6rem;line-height:1.15}
.hero .subtitle{font-size:1.05rem;color:var(--text2);max-width:560px;margin:0 auto 2rem;line-height:1.7}
.pills{display:flex;gap:.6rem;justify-content:center;flex-wrap:wrap;margin-bottom:2rem}
.pill{padding:.4rem 1rem;border-radius:99px;font-size:.75rem;font-weight:600;background:rgba(255,255,255,.04);border:1px solid var(--border);color:var(--text2);display:inline-flex;align-items:center;gap:.4rem}
.pill i{font-size:.65rem}
.stats{display:flex;gap:1rem;justify-content:center;flex-wrap:wrap}
.stat-card{background:rgba(255,255,255,.03);backdrop-filter:blur(12px);border:1px solid var(--border);border-radius:var(--radius);padding:.8rem 1.4rem;min-width:110px;text-align:center;transition:var(--transition)}
.stat-card:hover{border-color:rgba(129,140,248,.3);transform:translateY(-2px)}
.stat-num{font-size:1.6rem;font-weight:800;color:var(--accent);display:block}
.stat-label{font-size:.68rem;color:var(--text3);text-transform:uppercase;letter-spacing:.06em;margin-top:.1rem}
.hero-links{display:flex;gap:.8rem;margin-top:1.8rem;justify-content:center;flex-wrap:wrap}
.cite-link{display:inline-flex;align-items:center;gap:.5rem;padding:.6rem 1.4rem;background:rgba(99,102,241,.12);border:1px solid rgba(99,102,241,.25);border-radius:99px;color:var(--accent);font-size:.85rem;font-weight:600;transition:var(--transition)}
.cite-link:hover{background:rgba(99,102,241,.2);text-decoration:none;transform:translateY(-1px)}
.cite-link.github-link{background:rgba(255,255,255,.06);border-color:rgba(255,255,255,.15);color:var(--text)}
.cite-link.github-link:hover{background:rgba(255,255,255,.12)}
/* ─── SEARCH BAR ─── */
.search-wrap{position:sticky;top:0;z-index:50;background:rgba(11,17,32,.85);backdrop-filter:blur(16px);border-bottom:1px solid var(--border);padding:.6rem 1.5rem}
.search-inner{max-width:1400px;margin:0 auto;display:flex;align-items:center;gap:.8rem}
.search-box{flex:1;display:flex;align-items:center;background:var(--surface);border:1px solid var(--border);border-radius:99px;padding:0 1rem;transition:var(--transition)}
.search-box:focus-within{border-color:rgba(129,140,248,.4);box-shadow:0 0 0 3px rgba(129,140,248,.1)}
.search-box i{color:var(--text3);font-size:.85rem}
.search-box input{flex:1;background:none;border:none;color:var(--text);padding:.55rem .7rem;font-size:.85rem;font-family:inherit;outline:none}
.search-box input::placeholder{color:var(--text3)}
.search-results-info{font-size:.75rem;color:var(--text3);white-space:nowrap}
.expand-all-btn{padding:.45rem 1rem;border-radius:99px;border:1px solid var(--border);background:var(--surface);color:var(--text2);font-size:.78rem;font-family:inherit;cursor:pointer;transition:var(--transition);white-space:nowrap}
.expand-all-btn:hover{border-color:rgba(129,140,248,.3);color:var(--text)}
/* ─── LAYOUT ─── */
.layout{display:flex;max-width:1400px;margin:0 auto}
.sidebar{position:sticky;top:50px;align-self:flex-start;height:calc(100vh - 50px);width:270px;min-width:270px;background:var(--bg2);border-right:1px solid var(--border);overflow-y:auto;padding:1.2rem 0;scrollbar-width:thin;scrollbar-color:var(--surface2) transparent}
.sidebar::-webkit-scrollbar{width:4px}
.sidebar::-webkit-scrollbar-thumb{background:var(--surface2);border-radius:4px}
.sidebar-title{padding:.2rem 1.2rem .8rem;font-size:.7rem;font-weight:700;text-transform:uppercase;letter-spacing:.08em;color:var(--text3)}
.nav-link{display:flex;align-items:center;gap:.6rem;padding:.5rem 1.2rem;font-size:.8rem;color:var(--text2);border-left:3px solid transparent;transition:var(--transition);overflow:hidden}
.nav-link:hover,.nav-link.active{color:var(--text);background:rgba(255,255,255,.03);text-decoration:none}
.nav-link.active{border-left-color:var(--accent)}
.nav-icon{width:16px;text-align:center;font-size:.75rem;flex-shrink:0}
.nav-sub{display:block;padding:.3rem 1.2rem .3rem 2.8rem;font-size:.73rem;color:var(--text3);transition:var(--transition)}
.nav-sub:hover{color:var(--text2);text-decoration:none}
.badge{font-size:.65rem;padding:.1rem .45rem;border-radius:99px;margin-left:auto;font-weight:600;flex-shrink:0}
.badge-sm{font-size:.65rem;color:var(--text3);opacity:.7}
/* ─── MAIN ─── */
.main{flex:1;min-width:0}
.section{margin-bottom:1px}
.section-banner{position:relative;height:170px;overflow:hidden;display:flex;align-items:flex-end}
.banner-img{position:absolute;inset:0;width:100%;height:100%;object-fit:cover;filter:brightness(.35) saturate(1.3);transition:filter .5s}
.section:hover .banner-img{filter:brightness(.42) saturate(1.4)}
.banner-overlay{position:absolute;inset:0;background:linear-gradient(0deg,var(--bg) 0%,transparent 70%)}
.banner-text{position:relative;z-index:2;padding:1.2rem 2rem;width:100%}
.banner-icon{font-size:1.3rem;margin-bottom:.25rem;display:block;filter:drop-shadow(0 0 8px currentColor)}
.banner-text h2{font-size:1.4rem;font-weight:700;letter-spacing:-.01em}
.entry-count{font-size:.75rem;color:var(--text3);margin-top:.15rem}
.section-content{padding:1.2rem 2rem 2rem}
/* ─── TABLE ─── */
.table-wrap{overflow-x:auto;margin-bottom:1.2rem;border-radius:var(--radius);border:1px solid var(--border);background:var(--surface)}
table{width:100%;border-collapse:collapse;font-size:.8rem}
thead{position:sticky;top:0;z-index:1}
th{padding:.65rem 1rem;text-align:left;font-weight:600;font-size:.72rem;text-transform:uppercase;letter-spacing:.04em;color:var(--text2);background:var(--surface2);border-bottom:1px solid var(--border);white-space:nowrap}
td{padding:.55rem 1rem;border-bottom:1px solid rgba(255,255,255,.03);color:var(--text2);max-width:320px;vertical-align:top}
td a{font-weight:500}
tbody tr{transition:background .15s}
tbody tr:hover td{background:rgba(255,255,255,.02);color:var(--text)}
tbody tr.hidden{display:none}
/* ─── SUBSECTIONS ─── */
.subsection{margin-bottom:.6rem;border-radius:var(--radius);border:1px solid var(--border);overflow:hidden;background:var(--surface);transition:border-color .2s}
.subsection:hover{border-color:rgba(255,255,255,.1)}
.sub-toggle{width:100%;display:flex;justify-content:space-between;align-items:center;padding:.75rem 1.1rem;background:none;border:none;color:var(--text);font-size:.88rem;font-weight:600;cursor:pointer;font-family:inherit;text-align:left;transition:background .15s}
.sub-toggle:hover{background:rgba(255,255,255,.02)}
.chevron{transition:transform .3s;color:var(--text3);flex-shrink:0}
.sub-toggle[aria-expanded="true"] .chevron{transform:rotate(180deg)}
.sub-content{max-height:0;overflow:hidden;transition:max-height .45s cubic-bezier(.4,0,.2,1),padding .3s}
.sub-content.open{max-height:12000px;padding:.2rem 1rem 1rem}
/* ─── BACK TO TOP ─── */
.back-top{position:fixed;bottom:1.5rem;right:1.5rem;width:42px;height:42px;border-radius:50%;background:rgba(99,102,241,.15);backdrop-filter:blur(10px);border:1px solid rgba(99,102,241,.3);color:var(--accent);display:flex;align-items:center;justify-content:center;cursor:pointer;opacity:0;pointer-events:none;transition:var(--transition);z-index:100;font-size:1rem}
.back-top.show{opacity:1;pointer-events:all}
.back-top:hover{background:rgba(99,102,241,.25);transform:translateY(-2px)}
/* ─── FOOTER ─── */
.footer{text-align:center;padding:2.5rem 2rem;color:var(--text3);font-size:.8rem;border-top:1px solid var(--border)}
.footer a{color:var(--text2)}
/* ─── RESPONSIVE ─── */
@media(max-width:960px){
.sidebar{display:none}
.section-banner{height:130px}
.section-content{padding:1rem}
.banner-text{padding:1rem}
td,th{padding:.4rem .6rem;font-size:.72rem}
.hero{min-height:45vh}
.hero h1{font-size:1.6rem}
}
@media(max-width:600px){
.search-inner{flex-wrap:wrap}
.expand-all-btn{display:none}
.stats{gap:.5rem}
.stat-card{padding:.6rem .8rem;min-width:80px}
.stat-num{font-size:1.2rem}
}
</style>
</head>
<body>
<!-- HERO -->
<div class="hero">
<div class="hero-bg"></div>
<div class="hero-content">
<div class="pills">
<span class="pill"><i class="fas fa-circle"></i>CVPR 2025 Workshop</span>
<span class="pill"><i class="fas fa-circle"></i>Updated Apr 2026</span>
<span class="pill"><i class="fas fa-circle"></i>Open Source</span>
</div>
<h1>Vision-Language Models<br/>Survey & Overview</h1>
<p class="subtitle">A curated collection of state-of-the-art VLMs, benchmarks, RL alignment methods, applications, and open challenges in the multimodal AI landscape.</p>
<div class="stats">
<div class="stat-card"><span class="stat-num" id="stat-models">60</span><span class="stat-label">Models</span></div>
<div class="stat-card"><span class="stat-num" id="stat-benchmarks">85</span><span class="stat-label">Benchmarks</span></div>
<div class="stat-card"><span class="stat-num" id="stat-training">53</span><span class="stat-label">RL & SFT</span></div>
<div class="stat-card"><span class="stat-num" id="stat-apps">116</span><span class="stat-label">Applications</span></div>
</div>
<div class="hero-links">
<a class="cite-link" href="https://arxiv.org/abs/2501.02189" target="_blank"><i class="fas fa-file-alt"></i>Read the Paper</a>
<a class="cite-link github-link" href="https://github.com/zli12321/Vision-Language-Models-Overview" target="_blank"><i class="fab fa-github"></i>GitHub</a>
</div>
</div>
</div>
<!-- SEARCH -->
<div class="search-wrap">
<div class="search-inner">
<div class="search-box">
<i class="fas fa-search"></i>
<input type="text" id="searchInput" placeholder="Search models, papers, benchmarks..." autocomplete="off"/>
</div>
<span class="search-results-info" id="searchInfo"></span>
<button class="expand-all-btn" id="expandBtn" onclick="toggleAll()">Expand All</button>
</div>
</div>
<!-- LAYOUT -->
<div class="layout">
<nav class="sidebar">
<div class="sidebar-title">Contents</div>
<a href="#s0" class="nav-link" data-color="#6366f1"><i class="fas fa-robot nav-icon" style="color:#6366f1"></i>📚 SoTA VLMs<span class="badge" style="background:#6366f122;color:#6366f1">60</span></a>
<a href="#s1" class="nav-link" data-color="#0ea5e9"><i class="fas fa-chart-bar nav-icon" style="color:#0ea5e9"></i>🗂️ Benchmarks and Evaluation<span class="badge" style="background:#0ea5e922;color:#0ea5e9">85</span></a>
<a href="#s1-0" class="nav-sub">Datasets for Training VLMs <span class="badge-sm">9</span></a>
<a href="#s1-1" class="nav-sub">Datasets and Evaluation for VLM </a>
<a href="#s1-2" class="nav-sub">🧮 Visual Math (+ Visual Math Reasoning) <span class="badge-sm">4</span></a>
<a href="#s1-3" class="nav-sub">💬 Benchmark for Unified Models <span class="badge-sm">3</span></a>
<a href="#s1-4" class="nav-sub">🎞️ Video Understanding <span class="badge-sm">10</span></a>
<a href="#s1-5" class="nav-sub">💬 Multimodal Conversation <span class="badge-sm">1</span></a>
<a href="#s1-6" class="nav-sub">🧠 Multimodal General Intelligence <span class="badge-sm">7</span></a>
<a href="#s1-7" class="nav-sub">🔎 Visual Reasoning / VQA (+ Multilingual & OCR) <span class="badge-sm">12</span></a>
<a href="#s1-8" class="nav-sub">📝 Visual Text / Document Understanding (+ Charts) <span class="badge-sm">4</span></a>
<a href="#s1-9" class="nav-sub">🌄 Text‑to‑Image Generation <span class="badge-sm">2</span></a>
<a href="#s1-10" class="nav-sub">🚨 Hallucination Detection / Control <span class="badge-sm">11</span></a>
<a href="#s1-11" class="nav-sub">Benchmark Datasets, Simulators, and Generative Models for Embodied VLM <span class="badge-sm">22</span></a>
<a href="#s2" class="nav-link" data-color="#f59e0b"><i class="fas fa-fire nav-icon" style="color:#f59e0b"></i>⚒️ Post-Training<span class="badge" style="background:#f59e0b22;color:#f59e0b">53</span></a>
<a href="#s2-0" class="nav-sub">RL Alignment for VLM <span class="badge-sm">27</span></a>
<a href="#s2-1" class="nav-sub">Finetuning for VLM <span class="badge-sm">14</span></a>
<a href="#s2-2" class="nav-sub">VLM Alignment github <span class="badge-sm">7</span></a>
<a href="#s2-3" class="nav-sub">Prompt Optimization <span class="badge-sm">5</span></a>
<a href="#s3" class="nav-link" data-color="#10b981"><i class="fas fa-cogs nav-icon" style="color:#10b981"></i>⚒️ Applications<span class="badge" style="background:#10b98122;color:#10b981">116</span></a>
<a href="#s3-0" class="nav-sub">Embodied VLM Agents <span class="badge-sm">9</span></a>
<a href="#s3-1" class="nav-sub">Generative Visual Media Applications <span class="badge-sm">4</span></a>
<a href="#s3-2" class="nav-sub">Robotics and Embodied AI <span class="badge-sm">31</span></a>
<a href="#s3-3" class="nav-sub">Manipulation <span class="badge-sm">11</span></a>
<a href="#s3-4" class="nav-sub">Navigation <span class="badge-sm">8</span></a>
<a href="#s3-5" class="nav-sub">Human-robot Interaction <span class="badge-sm">3</span></a>
<a href="#s3-6" class="nav-sub">Autonomous Driving <span class="badge-sm">16</span></a>
<a href="#s3-7" class="nav-sub">Human-Centered AI <span class="badge-sm">5</span></a>
<a href="#s3-8" class="nav-sub">Web Agent <span class="badge-sm">7</span></a>
<a href="#s3-9" class="nav-sub">Accessibility <span class="badge-sm">3</span></a>
<a href="#s3-10" class="nav-sub">Healthcare <span class="badge-sm">9</span></a>
<a href="#s3-11" class="nav-sub">Social Goodness <span class="badge-sm">10</span></a>
<a href="#s4" class="nav-link" data-color="#ef4444"><i class="fas fa-shield-alt nav-icon" style="color:#ef4444"></i>Challenges<span class="badge" style="background:#ef444422;color:#ef4444">76</span></a>
<a href="#s4-0" class="nav-sub">Hallucination <span class="badge-sm">15</span></a>
<a href="#s4-1" class="nav-sub">Safety <span class="badge-sm">11</span></a>
<a href="#s4-2" class="nav-sub">Fairness <span class="badge-sm">8</span></a>
<a href="#s4-3" class="nav-sub">Alignment </a>
<a href="#s4-4" class="nav-sub">Multi-modality Alignment <span class="badge-sm">7</span></a>
<a href="#s4-5" class="nav-sub">Commonsense and Physics Alignment <span class="badge-sm">16</span></a>
<a href="#s4-6" class="nav-sub">Efficient Training and Fine-Tuning <span class="badge-sm">12</span></a>
<a href="#s4-7" class="nav-sub">Scarce of High-quality Dataset <span class="badge-sm">7</span></a>
</nav>
<main class="main">
<section class="section" id="s0">
<div class="section-banner" style="--accent:#6366f1">
<img src="https://images.unsplash.com/photo-1677442136019-21780ecad995?w=800&q=80" alt="" class="banner-img" loading="lazy"/>
<div class="banner-overlay"></div>
<div class="banner-text">
<i class="fas fa-robot banner-icon" style="color:#6366f1"></i>
<h2>📚 SoTA VLMs</h2>
<p class="entry-count">60 entries</p>
</div>
</div>
<div class="section-content">
<div class="table-wrap"><table><thead><tr><th>Model</th><th>Year</th><th>Architecture</th><th>Training Data</th><th>Parameters</th><th>Vision Encoder/Tokenizer</th><th>Pretrained Backbone Model</th></tr></thead><tbody><tr><td><a href="https://www.marktechpost.com/2026/04/11/liquid-ai-releases-lfm2-5-vl-450m-a-450m-parameter-vision-language-model-with-bounding-box-prediction-multilingual-support-and-sub-250ms-edge-inference/" target="_blank" rel="noopener">LFM2.5-VL-450M (Liquid AI)</a></td><td>04/11/2026</td><td>Liquid Foundation Model</td><td>Undisclosed</td><td>450M</td><td>Non-overlapping tile ViT</td><td>LFM2.5</td></tr><tr><td><a href="https://en.sedaily.com/finance/2026/04/09/lg-unveils-exaone-45-multimodal-ai-claims-victory-over" target="_blank" rel="noopener">EXAONE 4.5 (LG AI Research)</a></td><td>04/09/2026</td><td>Unified VL</td><td>Undisclosed</td><td>33B</td><td>Proprietary vision encoder</td><td>EXAONE 4.5</td></tr><tr><td><a href="https://blog.google/innovation-and-ai/technology/developers-tools/gemma-4/" target="_blank" rel="noopener">Gemma 4 (Google)</a></td><td>04/02/2026</td><td>Decoder-only / MoE</td><td>Undisclosed (140+ languages)</td><td>E2B / E4B / 26B MoE / 31B Dense</td><td>Native multimodal</td><td><a href="https://deepmind.google/models/gemini/" target="_blank" rel="noopener">Gemini 3</a></td></tr><tr><td><a href="https://www.marktechpost.com/2026/04/01/ibm-releases-granite-4-0-3b-vision-a-new-vision-language-model-for-enterprise-grade-document-data-extraction/" target="_blank" rel="noopener">Granite 4.0 3B Vision (IBM)</a></td><td>04/01/2026</td><td>Decoder-only</td><td>Enterprise document corpora</td><td>3B</td><td>Undisclosed</td><td>Granite 4.0</td></tr><tr><td><a href="https://arxiv.org/abs/2603.09877" target="_blank" rel="noopener">InternVL-U (Shanghai AI Lab)</a></td><td>03/10/2026</td><td>Unified (MLLM + MMDiT)</td><td>Multimodal understanding + generation</td><td>4B</td><td>InternViT</td><td>InternVL</td></tr><tr><td><a href="https://openai.com/index/introducing-gpt-5-4/" target="_blank" rel="noopener">GPT-5.4 / GPT-5.4 Thinking (OpenAI)</a></td><td>03/06/2026</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/abs/2603.03975" target="_blank" rel="noopener">Phi-4-Reasoning-Vision-15B (Microsoft)</a></td><td>03/04/2026</td><td>Decoder-only</td><td>Curated synthetic + filtered data</td><td>15B</td><td>High-res dynamic-resolution ViT</td><td>Phi-4</td></tr><tr><td><a href="https://deepmind.google/models/gemini/" target="_blank" rel="noopener">Gemini 3.0 (Google)</a></td><td>03/2026</td><td>Unified Model</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://github.com/QwenLM/Qwen3.5" target="_blank" rel="noopener">Qwen3.5 (Alibaba)</a></td><td>02/16/2026</td><td>Unified VL (early fusion)</td><td>Trillions of multimodal tokens</td><td>0.8B–397B (MoE, 17B active)</td><td>ViT (native)</td><td>Qwen3.5</td></tr><tr><td><a href="https://docs.anthropic.com/en/docs/about-claude/models" target="_blank" rel="noopener">Claude Opus 4.6 (Anthropic)</a></td><td>02/2026</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/pdf/2602.04705" target="_blank" rel="noopener">Erin 5.0 (Baidu)</a></td><td>02/05/2026</td><td>Unified Model (Visual, Text, Audio)</td><td>Unified Modality Dataset</td><td>-</td><td>CNN–ViT (Understanding)/Next-Frame-and-Scale Prediction (Generation)</td><td>Unified Autoregressive Transformer</td></tr><tr><td><a href="https://arxiv.org/abs/2601.10611" target="_blank" rel="noopener">Molmo2 (Allen AI)</a></td><td>01/15/2026</td><td>Decoder-only</td><td>7 new video + 2 multi-image datasets (9.19M videos)</td><td>4B / 7B / 8B</td><td>Bi-directional attention ViT</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen 3</a> / <a href="https://huggingface.co/allenai" target="_blank" rel="noopener">OLMo</a></td></tr><tr><td><a href="https://aistudio.google.com/models/gemini-3" target="_blank" rel="noopener">Gemini 3</a></td><td>11/18/2025</td><td>Unified Model</td><td>Undisclosed</td><td>-</td><td>-</td><td>-</td></tr><tr><td><a href="https://arxiv.org/pdf/2510.26583" target="_blank" rel="noopener">Emu3.5</a></td><td>10/30/2025</td><td>Deconder-only</td><td>Unified Modality Dataset</td><td>-</td><td>SigLIP</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen3</a></td></tr><tr><td><a href="https://github.com/deepseek-ai/DeepSeek-OCR/blob/main/DeepSeek_OCR_paper.pdf" target="_blank" rel="noopener">DeepSeek-OCR</a></td><td>10/20/2025</td><td>Encoder-Deconder</td><td>70% OCR, 20% general vision, 10% text-only</td><td><a href="https://huggingface.co/deepseek-ai/DeepSeek-OCR" target="_blank" rel="noopener">3B</a></td><td>DeepEncoder</td><td>DeepSeek-3B</td></tr><tr><td><a href="https://huggingface.co/Qwen/Qwen3-VL-8B-Instruct" target="_blank" rel="noopener">Qwen3-VL</a></td><td>10/11/2025</td><td>Decoder-Only</td><td>-</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-vl-68d2a7c1b8a8afce4ebd2dbe" target="_blank" rel="noopener">8B/4B</a></td><td>ViT</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen3</a></td></tr><tr><td><a href="https://github.com/QwenLM/Qwen3-VL" target="_blank" rel="noopener">Qwen3-VL-MoE</a></td><td>09/25/2025</td><td>Decoder-Only</td><td>-</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-vl-68d2a7c1b8a8afce4ebd2dbe" target="_blank" rel="noopener">235B-A22B</a></td><td>ViT</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen3</a></td></tr><tr><td><a href="https://github.com/QwenLM/Qwen3-Omni/blob/main/assets/Qwen3_Omni.pdf" target="_blank" rel="noopener">Qwen3-Omni</a> (Visual/Audio/Text)</td><td>09/21/2025</td><td>-</td><td>Video/Audio/Image</td><td>30B</td><td>ViT</td><td>Qwen3-Omni-MoE-Thinker</td></tr><tr><td><a href="https://github.com/EvolvingLMMs-Lab/LLaVA-OneVision-1.5" target="_blank" rel="noopener">LLaVA-Onevision-1.5</a></td><td>09/15/2025</td><td>-</td><td><a href="https://huggingface.co/datasets/lmms-lab/LLaVA-One-Vision-1.5-Mid-Training-85M" target="_blank" rel="noopener">Mid-Training-85M</a> & <a href="https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-1.5-Insturct-Data" target="_blank" rel="noopener">SFT</a></td><td>8B</td><td>Qwen2VLImageProcessor</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen3</a></td></tr><tr><td><a href="https://arxiv.org/abs/2508.18265" target="_blank" rel="noopener">InternVL3.5</a></td><td>08/25/2025</td><td>Decoder-Only</td><td>multimodal & text-only</td><td>30B/38B/241B</td><td>InternViT-300M/6B</td><td><a href="https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f" target="_blank" rel="noopener">Qwen3</a> / <a href="https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4" target="_blank" rel="noopener">GPT-OSS</a></td></tr><tr><td><a href="https://huggingface.co/Skywork/Skywork-UniPic-1.5B" target="_blank" rel="noopener">SkyWork-Unipic-1.5B</a></td><td>07/29/2025</td><td>-</td><td>image/video..</td><td>-</td><td>-</td><td>-</td></tr><tr><td><a href="https://x.ai/news/grok-4" target="_blank" rel="noopener">Grok 4</a></td><td>07/09/2025</td><td>-</td><td>image/video..</td><td>1-2 Trillion</td><td>-</td><td>-</td></tr><tr><td><a href="https://arxiv.org/abs/2507.01949" target="_blank" rel="noopener">Kwai Keye-VL (Kuaishou)</a></td><td>07/02/2025</td><td>Decdoer-only</td><td>image/video..</td><td>8B</td><td>ViT</td><td><a href="https://huggingface.co/Qwen/Qwen3-8B" target="_blank" rel="noopener">QWen-3-8B</a></td></tr><tr><td><a href="https://arxiv.org/abs/2506.18871" target="_blank" rel="noopener">OmniGen2</a></td><td>06/23/2025</td><td>Decdoer-only & VAE</td><td>LLaVA-OneVision/ SAM-LLaVA..</td><td>-</td><td>ViT</td><td><a href="https://huggingface.co/collections/Qwen/qwen25-vl-6795ffac22b334a837c0f9a5" target="_blank" rel="noopener">QWen-2.5-VL</a></td></tr><tr><td><a href="https://deepmind.google/models/gemini/pro/" target="_blank" rel="noopener">Gemini-2.5-Pro</a></td><td>06/17/2025</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td><a href="https://openai.com/index/introducing-o3-and-o4-mini/" target="_blank" rel="noopener">GPT-o3/o4-mini</a></td><td>06/10/2025</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/abs/2506.03569" target="_blank" rel="noopener">Mimo-VL (Xiaomi)</a></td><td>06/04/2025</td><td>Decdoer-only</td><td>24 Trillion MLLM tokens</td><td>7B</td><td><a href="https://huggingface.co/XiaomiMiMo/MiMo-7B-Base" target="_blank" rel="noopener">Qwen2.5-ViT | [Mimo-7B-base</a></td><td></td></tr><tr><td><a href="https://arxiv.org/abs/2505.14683" target="_blank" rel="noopener">BAGEL (Bytedance)</a></td><td>05/20/2025</td><td>Unified Model</td><td>Video/Image/Text</td><td>7B</td><td>SigLIP2-so400m/14](https://arxiv.org/abs/2502.14786)</td><td><a href="https://arxiv.org/abs/2412.15115" target="_blank" rel="noopener">Qwen2.5</a></td></tr><tr><td><a href="https://www.arxiv.org/abs/2505.09568" target="_blank" rel="noopener">BLIP3-o</a></td><td>05/14/2025</td><td>Decdoer-only</td><td>(BLIP3-o 60K) GPT-4o Generated Image Generation Data</td><td>4/8B</td><td>ViT</td><td><a href="https://huggingface.co/collections/Qwen/qwen25-vl-6795ffac22b334a837c0f9a5" target="_blank" rel="noopener">QWen-2.5-VL</a></td></tr><tr><td><a href="https://arxiv.org/abs/2504.10479" target="_blank" rel="noopener">InternVL-3</a></td><td>04/14/2025</td><td>Decdoer-only</td><td>200 Billion Tokens</td><td>1/2/8/9/14/38/78B</td><td>ViT-300M/6B</td><td><a href="https://huggingface.co/OpenGVLab/InternVL3-78B" target="_blank" rel="noopener">InterLM2.5/QWen2.5</a></td></tr><tr><td><a href="https://ai.meta.com/blog/llama-4-multimodal-intelligence/" target="_blank" rel="noopener">LLaMA4-Scout/Maverick</a></td><td>04/04/2025</td><td>Decdoer-only</td><td>40/20 Trillion Tokens</td><td>17B</td><td><a href="https://github.com/facebookresearch/MetaCLIP" target="_blank" rel="noopener">MetaClip</a></td><td><a href="https://huggingface.co/collections/meta-llama/llama-4-67f0c30d9fe03840bc9d0164" target="_blank" rel="noopener">LLaMA4</a></td></tr><tr><td><a href="https://arxiv.org/abs/2503.20215" target="_blank" rel="noopener">Qwen2.5-Omni</a></td><td>03/26/2025</td><td>Decdoer-only</td><td>Video/Audio/Image/Text</td><td>7B</td><td>Qwen2-Audio/Qwen2.5-VL ViT</td><td><a href="https://arxiv.org/abs/2408.16725" target="_blank" rel="noopener">End-to-End Mini-Omni</a></td></tr><tr><td><a href="https://arxiv.org/abs/2502.13923" target="_blank" rel="noopener">QWen2.5-VL</a></td><td>01/28/2025</td><td>Decdoer-only</td><td>Image caption, VQA, grounding agent, long video</td><td>3B/7B/72B</td><td>Redesigned ViT</td><td><a href="https://huggingface.co/Qwen" target="_blank" rel="noopener">Qwen2.5</a></td></tr><tr><td><a href="https://z.ai/blog/glm-4.6v" target="_blank" rel="noopener">GLM-4.6V (Zhipu / Z.AI)</a></td><td>12/2025</td><td>Decoder-only</td><td>Undisclosed</td><td>106B / 9B (Flash)</td><td>Undisclosed</td><td>GLM-4.6</td></tr><tr><td><a href="https://arxiv.org/pdf/2502.04328" target="_blank" rel="noopener">Ola</a></td><td>2025</td><td>Decoder-only</td><td>Image/Video/Audio/Text</td><td>7B</td><td><a href="https://huggingface.co/THUdyh/Oryx-ViT" target="_blank" rel="noopener">OryxViT</a></td><td><a href="https://qwenlm.github.io/blog/qwen2.5/" target="_blank" rel="noopener">Qwen-2.5-7B</a>, <a href="https://arxiv.org/pdf/2303.15343" target="_blank" rel="noopener">SigLIP-400M</a>, <a href="https://arxiv.org/pdf/2212.04356" target="_blank" rel="noopener">Whisper-V3-Large</a>, <a href="https://arxiv.org/pdf/2212.09058" target="_blank" rel="noopener">BEATs-AS2M(cpt2)</a></td></tr><tr><td><a href="https://arxiv.org/abs/2501.15558" target="_blank" rel="noopener">Ocean-OCR</a></td><td>2025</td><td>Decdoer-only</td><td>Pure Text, Caption, <a href="https://github.com/OpenGVLab/MM-Interleaved" target="_blank" rel="noopener">Interleaved</a>, <a href="https://github.com/X-PLUG/mPLUG-DocOwl/tree/main/DocOwl1.5" target="_blank" rel="noopener">OCR</a></td><td>3B</td><td><a href="https://arxiv.org/pdf/2307.06304" target="_blank" rel="noopener">NaViT</a></td><td>Pretrained from scratch</td></tr><tr><td><a href="https://huggingface.co/blog/smolervlm" target="_blank" rel="noopener">SmolVLM</a></td><td>2025</td><td>Decoder-only</td><td><a href="https://huggingface.co/HuggingFaceTB/SmolVLM-Instruct/blob/main/smolvlm-data.pdf" target="_blank" rel="noopener">SmolVLM-Instruct</a></td><td>250M & 500M</td><td>SigLIP</td><td><a href="https://huggingface.co/blog/smollm" target="_blank" rel="noopener">SmolLM</a></td></tr><tr><td><a href="https://janusai.pro/wp-content/uploads/2025/01/janus_pro_tech_report.pdf" target="_blank" rel="noopener">DeepSeek-Janus-Pro</a></td><td>2025</td><td>Decoder-only</td><td>Undisclosed</td><td>7B</td><td>SigLIP</td><td><a href="https://huggingface.co/deepseek-ai/Janus-Pro-7B" target="_blank" rel="noopener">DeepSeek-Janus-Pro</a></td></tr><tr><td><a href="https://arxiv.org/abs/2412.03565" target="_blank" rel="noopener">Inst-IT</a></td><td>2024</td><td>Decoder-only</td><td><a href="https://huggingface.co/datasets/Inst-IT/Inst-It-Dataset" target="_blank" rel="noopener">Inst-IT Dataset</a>, <a href="https://huggingface.co/datasets/lmms-lab/LLaVA-NeXT-Data" target="_blank" rel="noopener">LLaVA-NeXT-Data</a></td><td>7B</td><td>CLIP/Vicuna, SigLIP/Qwen2</td><td><a href="https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b" target="_blank" rel="noopener">LLaVA-NeXT</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2412.10302" target="_blank" rel="noopener">DeepSeek-VL2</a></td><td>2024</td><td>Decoder-only</td><td><a href="https://huggingface.co/datasets/google/wit" target="_blank" rel="noopener">WiT</a>, <a href="https://huggingface.co/datasets/ajibawa-2023/WikiHow" target="_blank" rel="noopener">WikiHow</a></td><td>4.5B x 74</td><td>SigLIP/SAMB</td><td><a href="https://arxiv.org/pdf/2412.10302" target="_blank" rel="noopener">DeepSeekMoE</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2408.08872" target="_blank" rel="noopener">xGen-MM (BLIP-3)</a></td><td>2024</td><td>Decoder-only</td><td><a href="https://arxiv.org/pdf/2406.11271" target="_blank" rel="noopener">MINT-1T</a>, <a href="https://arxiv.org/pdf/2306.16527" target="_blank" rel="noopener">OBELICS</a>, <a href="https://github.com/salesforce/LAVIS/tree/xgen-mm?tab=readme-ov-file#data-preparation" target="_blank" rel="noopener">Caption</a></td><td>4B</td><td>ViT + <a href="https://arxiv.org/pdf/2204.14198" target="_blank" rel="noopener">Perceiver Resampler</a></td><td><a href="https://arxiv.org/pdf/2404.14219" target="_blank" rel="noopener">Phi-3-mini</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2408.11039" target="_blank" rel="noopener">TransFusion</a></td><td>2024</td><td>Encoder-decoder</td><td>Undisclosed</td><td>7B</td><td>VAE Encoder</td><td>Pretrained from scratch on transformer architecture</td></tr><tr><td><a href="https://arxiv.org/pdf/2410.08565" target="_blank" rel="noopener">Baichuan Ocean Mini</a></td><td>2024</td><td>Decoder-only</td><td>Image/Video/Audio/Text</td><td>7B</td><td>CLIP ViT-L/14</td><td><a href="https://arxiv.org/pdf/2309.10305" target="_blank" rel="noopener">Baichuan</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2407.21783" target="_blank" rel="noopener">LLaMA 3.2-vision</a></td><td>2024</td><td>Decoder-only</td><td>Undisclosed</td><td>11B-90B</td><td>CLIP</td><td><a href="https://arxiv.org/pdf/2407.21783" target="_blank" rel="noopener">LLaMA-3.1</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2410.07073" target="_blank" rel="noopener">Pixtral</a></td><td>2024</td><td>Decoder-only</td><td>Undisclosed</td><td>12B</td><td>CLIP ViT-L/14</td><td><a href="https://mistral.ai/" target="_blank" rel="noopener">Mistral Large 2</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2409.12191" target="_blank" rel="noopener">Qwen2-VL</a></td><td>2024</td><td>Decoder-only</td><td>Undisclosed</td><td>7B-14B</td><td>EVA-CLIP ViT-L</td><td><a href="https://arxiv.org/pdf/2407.10671" target="_blank" rel="noopener">Qwen-2</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2409.11402" target="_blank" rel="noopener">NVLM</a></td><td>2024</td><td>Encoder-decoder</td><td><a href="https://laion.ai/blog/laion-5b/" target="_blank" rel="noopener">LAION-115M </a></td><td>8B-24B</td><td>Custom ViT</td><td><a href="https://arxiv.org/pdf/2407.10671" target="_blank" rel="noopener">Qwen-2-Instruct</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2409.18869" target="_blank" rel="noopener">Emu3</a></td><td>2024</td><td>Decoder-only</td><td><a href="https://arxiv.org/pdf/2408.07410" target="_blank" rel="noopener">Aquila</a></td><td>7B</td><td>MoVQGAN</td><td><a href="https://arxiv.org/pdf/2307.09288" target="_blank" rel="noopener">LLaMA-2</a></td></tr><tr><td><a href="https://claude.ai/new" target="_blank" rel="noopener">Claude 3</a></td><td>2024</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/pdf/2312.14238" target="_blank" rel="noopener">InternVL</a></td><td>2023</td><td>Encoder-decoder</td><td><a href="https://laion.ai/blog/laion-5b/" target="_blank" rel="noopener">LAION-en, LAION- multi</a></td><td>7B/20B</td><td>Eva CLIP ViT-g</td><td><a href="https://arxiv.org/pdf/2304.08177" target="_blank" rel="noopener">QLLaMA</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2305.06500" target="_blank" rel="noopener">InstructBLIP</a></td><td>2023</td><td>Encoder-decoder</td><td><a href="https://cocodataset.org/#home" target="_blank" rel="noopener">CoCo</a>, <a href="https://huggingface.co/datasets/lmms-lab/VQAv2" target="_blank" rel="noopener">VQAv2</a></td><td>13B</td><td>ViT</td><td><a href="https://arxiv.org/pdf/2210.11416" target="_blank" rel="noopener">Flan-T5</a>, <a href="https://lmsys.org/blog/2023-03-30-vicuna/" target="_blank" rel="noopener">Vicuna</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2311.03079" target="_blank" rel="noopener">CogVLM</a></td><td>2023</td><td>Encoder-decoder</td><td><a href="https://sisap-challenges.github.io/2024/datasets/" target="_blank" rel="noopener">LAION-2B</a> ,<a href="https://github.com/kakaobrain/coyo-dataset" target="_blank" rel="noopener">COYO-700M</a></td><td>18B</td><td>CLIP ViT-L/14</td><td><a href="https://lmsys.org/blog/2023-03-30-vicuna/" target="_blank" rel="noopener">Vicuna</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2303.03378" target="_blank" rel="noopener">PaLM-E</a></td><td>2023</td><td>Decoder-only</td><td>All robots, <a href="https://arxiv.org/pdf/2209.06794" target="_blank" rel="noopener">WebLI</a></td><td>562B</td><td>ViT</td><td><a href="https://arxiv.org/pdf/2204.02311" target="_blank" rel="noopener">PaLM</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2310.03744" target="_blank" rel="noopener">LLaVA-1.5</a></td><td>2023</td><td>Decoder-only</td><td><a href="https://cocodataset.org/#home" target="_blank" rel="noopener">COCO</a></td><td>13B</td><td>CLIP ViT-L/14</td><td><a href="https://lmsys.org/blog/2023-03-30-vicuna/" target="_blank" rel="noopener">Vicuna</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2312.11805" target="_blank" rel="noopener">Gemini</a></td><td>2023</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/pdf/2309.17421" target="_blank" rel="noopener">GPT-4V</a></td><td>2023</td><td>Decoder-only</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td><td>Undisclosed</td></tr><tr><td><a href="https://arxiv.org/pdf/2301.12597" target="_blank" rel="noopener">BLIP-2</a></td><td>2023</td><td>Encoder-decoder</td><td><a href="https://cocodataset.org/#home" target="_blank" rel="noopener">COCO</a>, <a href="https://huggingface.co/datasets/ranjaykrishna/visual_genome" target="_blank" rel="noopener">Visual Genome</a></td><td>7B-13B</td><td>ViT-g</td><td><a href="https://arxiv.org/pdf/2205.01068" target="_blank" rel="noopener">Open Pretrained Transformer (OPT)</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2204.14198" target="_blank" rel="noopener">Flamingo</a></td><td>2022</td><td>Decoder-only</td><td><a href="https://arxiv.org/pdf/2204.14198" target="_blank" rel="noopener">M3W</a>, <a href="https://huggingface.co/docs/transformers/en/model_doc/align" target="_blank" rel="noopener">ALIGN</a></td><td>80B</td><td>Custom</td><td><a href="https://arxiv.org/pdf/2203.15556" target="_blank" rel="noopener">Chinchilla</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2201.12086" target="_blank" rel="noopener">BLIP</a></td><td>2022</td><td>Encoder-decoder</td><td><a href="https://cocodataset.org/#home" target="_blank" rel="noopener">COCO</a>, <a href="https://huggingface.co/datasets/ranjaykrishna/visual_genome/" target="_blank" rel="noopener">Visual Genome</a></td><td>223M-400M</td><td>ViT-B/L/g</td><td>Pretrained from scratch</td></tr><tr><td><a href="https://arxiv.org/pdf/2103.00020" target="_blank" rel="noopener">CLIP</a></td><td>2021</td><td>Encoder-decoder</td><td>400M image-text pairs</td><td>63M-355M</td><td>ViT/ResNet</td><td>Pretrained from scratch</td></tr></tbody></table></div>
</div>
</section>
<section class="section" id="s1">
<div class="section-banner" style="--accent:#0ea5e9">
<img src="https://images.unsplash.com/photo-1551288049-bebda4e38f71?w=800&q=80" alt="" class="banner-img" loading="lazy"/>
<div class="banner-overlay"></div>
<div class="banner-text">
<i class="fas fa-chart-bar banner-icon" style="color:#0ea5e9"></i>
<h2>🗂️ Benchmarks and Evaluation</h2>
<p class="entry-count">85 entries</p>
</div>
</div>
<div class="section-content">
<div class="subsection" id="s1-0">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Datasets for Training VLMs <span class="badge-sm">9</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Size</th></tr></thead><tbody><tr><td><a href="https://huggingface.tw/papers/2604.08516" target="_blank" rel="noopener">MolmoWebMix (Allen AI)</a>(04/2026)</td><td>Web Agent Training Trajectories</td><td>100K+ synthetic + 30K human demos</td></tr><tr><td><a href="https://arxiv.org/html/2604.04917v1" target="_blank" rel="noopener">Vero-600K</a>(04/2026)</td><td>Broad Visual Reasoning RL Training</td><td>600K samples from 59 datasets, 6 task categories</td></tr><tr><td><a href="https://arxiv.org/abs/2603.29630" target="_blank" rel="noopener">BigEarthNet.txt</a>(03/2026)</td><td>Multi-sensor Earth Observation Image-Text</td><td>464K images, 9.6M text annotations</td></tr><tr><td><a href="https://arxiv.org/abs/2602.13758" target="_blank" rel="noopener">OmniScience</a>(02/2026)</td><td>Scientific Image Understanding</td><td>1.5M figure-caption-context triplets</td></tr><tr><td><a href="https://arxiv.org/abs/2602.07790" target="_blank" rel="noopener">MaD-Mix</a>(02/2026)</td><td>Multi-modal Data Mixture Optimization</td><td>Framework (0.5B–7B scale)</td></tr><tr><td><a href="https://openreview.net/pdf/54b83db2dc00f01b015b8356db617fdd6e38240f.pdf" target="_blank" rel="noopener">OVID</a>(2026)</td><td>Open Video Pre-training</td><td>10M hours, 300M frame-caption pairs</td></tr><tr><td><a href="https://arxiv.org/abs/2601.10611" target="_blank" rel="noopener">Molmo2 Video Datasets</a>(01/2026)</td><td>Video Captions, QA, Tracking, Pointing</td><td>9.19M videos (7 video + 2 multi-image datasets)</td></tr><tr><td><a href="https://huggingface.co/datasets/OpenDataArena/MMFineReason-1.8M-Qwen3-VL-235B-Thinking" target="_blank" rel="noopener">MMFineReason</a>(/1/30/2026)</td><td>REasoning</td><td>1.8M</td></tr><tr><td><a href="https://huggingface.co/datasets/HuggingFaceM4/FineVision" target="_blank" rel="noopener">FineVision</a>(09/04/2025)</td><td>Mixed Domain</td><td>24.3 M/4.48TB</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-2">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🧮 Visual Math (+ Visual Math Reasoning) <span class="badge-sm">4</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2402.14804" target="_blank" rel="noopener">MathVision</a></td><td>Visual Math</td><td>MC / Answer Match</td><td>Human</td><td>3.04</td><td><a href="https://mathllm.github.io/mathvision/" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2310.02255" target="_blank" rel="noopener">MathVista</a></td><td>Visual Math</td><td>MC / Answer Match</td><td>Human</td><td>6</td><td><a href="https://mathvista.github.io" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2403.14624" target="_blank" rel="noopener">MathVerse</a></td><td>Visual Math</td><td>MC</td><td>Human</td><td>4.6</td><td><a href="https://mathverse-cuhk.github.io" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2503.14939" target="_blank" rel="noopener">VisNumBench</a></td><td>Visual Number Reasoning</td><td>MC</td><td>Python Program generated/Web Collection/Real life photos</td><td>1.91</td><td><a href="https://wwwtttjjj.github.io/VisNumBench/" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-3">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>💬 Benchmark for Unified Models <span class="badge-sm">3</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://openreview.net/pdf?id=gu3DRaDWiI" target="_blank" rel="noopener">ROVER</a></td><td>Reciprocal Cross-Modal Reasoning</td><td>Visual Gen + Verbal Gen Eval</td><td>Human</td><td>1.3 (1,876 images)</td><td><a href="https://openreview.net/pdf?id=gu3DRaDWiI" target="_blank" rel="noopener">Paper</a></td></tr><tr><td></td><td><a href="https://arxiv.org/pdf/2509.24897" target="_blank" rel="noopener">RealUnify</a></td><td>Math, World knowledge, Image Gen</td><td>Direct & StepWise Eval (Sec 3.3)</td><td>Script & Humanverification</td><td>1.0</td></tr><tr><td><a href="https://arxiv.org/abs/2510.13759" target="_blank" rel="noopener">Uni-MMMU</a></td><td>Science, Code, Image Gen</td><td>DreamSim (Image Gen Eval) & String Matching (Understanding Eval)</td><td>-</td><td>1.0</td><td><a href="https://vchitect.github.io/Uni-MMMU-Project" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-4">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🎞️ Video Understanding <span class="badge-sm">10</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2603.14145" target="_blank" rel="noopener">MMOU</a></td><td>Omni-modal Long Video Understanding</td><td>MC</td><td>Human</td><td>15 (9,038 videos)</td><td><a href="https://arxiv.org/abs/2603.14145" target="_blank" rel="noopener">Paper</a></td></tr><tr><td><a href="https://arxiv.org/abs/2501.13826" target="_blank" rel="noopener">Video-MMMU</a></td><td>Knowledge Acquisition from Professional Videos</td><td>MC + Knowledge Gain</td><td>Expert</td><td>0.9 (300 videos)</td><td><a href="https://arxiv.org/abs/2501.13826" target="_blank" rel="noopener">Paper</a></td></tr><tr><td><a href="https://arxiv.org/abs/2501.12380" target="_blank" rel="noopener">MMVU</a></td><td>Expert-Level Multi-Discipline Video Understanding</td><td>MC</td><td>Expert</td><td>3 (27 subjects)</td><td><a href="https://arxiv.org/abs/2501.12380" target="_blank" rel="noopener">Paper</a></td></tr><tr><td></td><td><a href="https://arxiv.org/abs/2505.01481" target="_blank" rel="noopener">VideoHallu</a></td><td>Video Understanding</td><td>LLM Eval</td><td>Human</td><td>3.2</td></tr><tr><td><a href="https://arxiv.org/abs/2503.18923" target="_blank" rel="noopener">Video SimpleQA</a></td><td>Video Understanding</td><td>LLM Eval</td><td>Human</td><td>2.03</td><td><a href="https://videosimpleqa.github.io" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2307.16449" target="_blank" rel="noopener">MovieChat</a></td><td>Video Understanding</td><td>LLM Eval</td><td>Human</td><td>1</td><td><a href="https://rese1f.github.io/MovieChat/" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2305.13786" target="_blank" rel="noopener">Perception‑Test</a></td><td>Video Understanding</td><td>MC</td><td>Crowd</td><td>11.6</td><td><a href="https://github.com/google-deepmind/perception_test" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2405.21075" target="_blank" rel="noopener">VideoMME</a></td><td>Video Understanding</td><td>MC</td><td>Experts</td><td>2.7</td><td><a href="https://video-mme.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2308.09126" target="_blank" rel="noopener">EgoSchem</a></td><td>Video Understanding</td><td>MC</td><td>Synth / Human</td><td>5</td><td><a href="https://egoschema.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/abs/2412.03565" target="_blank" rel="noopener">Inst‑IT‑Bench</a></td><td>Fine‑grained Image & Video</td><td>MC & LLM</td><td>Human / Synth</td><td>2</td><td><a href="https://github.com/inst-it/inst-it" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-5">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>💬 Multimodal Conversation <span class="badge-sm">1</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2412.08687" target="_blank" rel="noopener">VisionArena</a></td><td>Multimodal Conversation</td><td>Pairwise Pref</td><td>Human</td><td>23</td><td><a href="https://huggingface.co/lmarena-ai" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-6">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🧠 Multimodal General Intelligence <span class="badge-sm">7</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2603.09471" target="_blank" rel="noopener">OmniEarth</a></td><td>Geospatial / Remote Sensing VLM Eval</td><td>MC + Open VQA</td><td>Human (verified)</td><td>44.2 (9,275 images, 28 tasks)</td><td><a href="https://arxiv.org/abs/2603.09471" target="_blank" rel="noopener">Paper</a></td></tr><tr><td></td><td><a href="https://arxiv.org/abs/2603.05697" target="_blank" rel="noopener">MultiHaystack</a></td><td>Multimodal Retrieval & Reasoning</td><td>Retrieval + QA</td><td>Human</td><td>0.75 (46K+ candidates)</td></tr><tr><td></td><td><a href="https://arxiv.org/abs/2601.02316" target="_blank" rel="noopener">DatBench</a></td><td>Discriminative, Faithful VLM Eval</td><td>MC (format-aware)</td><td>Synth</td><td>-</td></tr><tr><td></td><td><a href="https://arxiv.org/pdf/2009.03300" target="_blank" rel="noopener">MMLU</a></td><td>General MM</td><td>MC</td><td>Human</td><td>15.9</td></tr><tr><td><a href="https://arxiv.org/pdf/2403.20330" target="_blank" rel="noopener">MMStar</a></td><td>General MM</td><td>MC</td><td>Human</td><td>1.5</td><td><a href="https://mmstar-benchmark.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2410.14669" target="_blank" rel="noopener">NaturalBench</a></td><td>General MM</td><td>Yes/No, MC</td><td>Human</td><td>10</td><td><a href="https://huggingface.co/datasets/BaiqiL/NaturalBench" target="_blank" rel="noopener">HF</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2501.16411" target="_blank" rel="noopener">PHYSBENCH</a></td><td>Visual Math Reasoning</td><td>MC</td><td>Grad STEM</td><td>0.10</td><td><a href="https://github.com/USC-GVL/PhysBench" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-7">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🔎 Visual Reasoning / VQA (+ Multilingual & OCR) <span class="badge-sm">12</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2501.05444" target="_blank" rel="noopener">EMMA</a></td><td>Visual Reasoning</td><td>MC</td><td>Human + Synth</td><td>2.8</td><td><a href="emma-benchmark.github.io" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2404.16006" target="_blank" rel="noopener">MMTBENCH</a></td><td>Visual Reasoning & QA</td><td>MC</td><td>AI Experts</td><td>30.1</td><td><a href="https://github.com/tylin/coco-caption" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2308.02490" target="_blank" rel="noopener">MM‑Vet</a></td><td>OCR / Visual Reasoning</td><td>LLM Eval</td><td>Human</td><td>0.2</td><td><a href="https://github.com/yuweihao/MM-Vet" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2307.06281" target="_blank" rel="noopener">MM‑En/CN</a></td><td>Multilingual MM Understanding</td><td>MC</td><td>Human</td><td>3.2</td><td><a href="https://github.com/open-compass/VLMEvalKit" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2305.13245" target="_blank" rel="noopener">GQA</a></td><td>Visual Reasoning & QA</td><td>Answer Match</td><td>Seed + Synth</td><td>22</td><td><a href="https://cs.stanford.edu/people/dorarad/gqa" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/abs/1811.10830" target="_blank" rel="noopener">VCR</a></td><td>Visual Reasoning & QA</td><td>MC</td><td>MTurks</td><td>290</td><td><a href="https://visualcommonsense.com/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/1505.00468" target="_blank" rel="noopener">VQAv2</a></td><td>Visual Reasoning & QA</td><td>Yes/No, Ans Match</td><td>MTurks</td><td>1100</td><td><a href="https://github.com/salesforce/LAVIS/blob/main/dataset_card/vqav2.md" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2311.16502" target="_blank" rel="noopener">MMMU</a></td><td>Visual Reasoning & QA</td><td>Ans Match, MC</td><td>College</td><td>11.5</td><td><a href="https://mmmu-benchmark.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/abs/2409.02813" target="_blank" rel="noopener">MMMU-Pro</a></td><td>Visual Reasoning & QA</td><td>Ans Match, MC</td><td>College</td><td>5.19</td><td><a href="https://mmmu-benchmark.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2503.10615" target="_blank" rel="noopener">R1‑Onevision</a></td><td>Visual Reasoning & QA</td><td>MC</td><td>Human</td><td>155</td><td><a href="https://github.com/Fancy-MLLM/R1-Onevision" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2502.12084" target="_blank" rel="noopener">VLM²‑Bench</a></td><td>Visual Reasoning & QA</td><td>Ans Match, MC</td><td>Human</td><td>3</td><td><a href="https://vlm2-bench.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2503.10582" target="_blank" rel="noopener">VisualWebInstruct</a></td><td>Visual Reasoning & QA</td><td>LLM Eval</td><td>Web</td><td>0.9</td><td><a href="https://tiger-ai-lab.github.io/VisualWebInstruct/" target="_blank" rel="noopener">Site</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-8">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>📝 Visual Text / Document Understanding (+ Charts) <span class="badge-sm">4</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2604.03660" target="_blank" rel="noopener">TableVision</a></td><td>Spatially Grounded Table Reasoning</td><td>3-level Cognitive Eval</td><td>Human</td><td>6.8 (13 sub-categories)</td><td><a href="https://arxiv.org/abs/2604.03660" target="_blank" rel="noopener">Paper</a></td></tr><tr><td><a href="https://arxiv.org/pdf/1904.08920" target="_blank" rel="noopener">TextVQA</a></td><td>Visual Text Understanding</td><td>Ans Match</td><td>Expert</td><td>28.6</td><td><a href="https://github.com/facebookresearch/mmf" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2007.00398" target="_blank" rel="noopener">DocVQA</a></td><td>Document VQA</td><td>Ans Match</td><td>Crowd</td><td>50</td><td><a href="https://www.docvqa.org/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/abs/2203.10244" target="_blank" rel="noopener">ChartQA</a></td><td>Chart Graphic Understanding</td><td>Ans Match</td><td>Crowd / Synth</td><td>32.7</td><td><a href="https://github.com/vis-nlp/ChartQA" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-9">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🌄 Text‑to‑Image Generation <span class="badge-sm">2</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/pdf/1405.0312" target="_blank" rel="noopener">MSCOCO‑30K</a></td><td>Text‑to‑Image</td><td>BLEU, ROUGE, Sim</td><td>MTurks</td><td>30</td><td><a href="https://cocodataset.org/#home" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2406.13743" target="_blank" rel="noopener">GenAI‑Bench</a></td><td>Text‑to‑Image</td><td>Human Rating</td><td>Human</td><td>80</td><td><a href="https://huggingface.co/datasets/BaiqiL/GenAI-Bench" target="_blank" rel="noopener">HF</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-10">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>🚨 Hallucination Detection / Control <span class="badge-sm">11</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Dataset</th><th>Task</th><th>Eval Protocol</th><th>Annotators</th><th>Size (K)</th><th>Code / Site</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/pdf/2310.14566" target="_blank" rel="noopener">HallusionBench</a></td><td>Hallucination</td><td>Yes/No</td><td>Human</td><td>1.13</td><td><a href="https://github.com/tianyi-lab/HallusionBench" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2305.10355" target="_blank" rel="noopener">POPE</a></td><td>Hallucination</td><td>Yes/No</td><td>Human</td><td>9</td><td><a href="https://github.com/RUCAIBox/POPE" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/1809.02156" target="_blank" rel="noopener">CHAIR</a></td><td>Hallucination</td><td>Yes/No</td><td>Human</td><td>124</td><td><a href="https://github.com/LisaAnne/Hallucination" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2308.06394" target="_blank" rel="noopener">MHalDetect</a></td><td>Hallucination</td><td>Ans Match</td><td>Human</td><td>4</td><td><a href="https://github.com/LisaAnne/Hallucination" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2408.01355" target="_blank" rel="noopener">Hallu‑Pi</a></td><td>Hallucination</td><td>Ans Match</td><td>Human</td><td>1.26</td><td><a href="https://github.com/NJUNLP/Hallu-PI" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2310.01779" target="_blank" rel="noopener">HallE‑Control</a></td><td>Hallucination</td><td>Yes/No</td><td>Human</td><td>108</td><td><a href="https://github.com/bronyayang/HallE_Control" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2406.10900" target="_blank" rel="noopener">AutoHallusion</a></td><td>Hallucination</td><td>Ans Match</td><td>Synth</td><td>3.129</td><td><a href="https://github.com/wuxiyang1996/AutoHallusion" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2407.13442" target="_blank" rel="noopener">BEAF</a></td><td>Hallucination</td><td>Yes/No</td><td>Human</td><td>26</td><td><a href="https://beafbench.github.io/" target="_blank" rel="noopener">Site</a></td></tr><tr><td><a href="https://arxiv.org/abs/2306.14565" target="_blank" rel="noopener">GAIVE</a></td><td>Hallucination</td><td>Ans Match</td><td>Synth</td><td>320</td><td><a href="https://github.com/FuxiaoLiu/LRV-Instruction" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2402.15721" target="_blank" rel="noopener">HalEval</a></td><td>Hallucination</td><td>Yes/No</td><td>Crowd / Synth</td><td>2</td><td><a href="https://github.com/WisdomShell/hal-eval" target="_blank" rel="noopener">Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2311.07397" target="_blank" rel="noopener">AMBER</a></td><td>Hallucination</td><td>Ans Match</td><td>Human</td><td>15.22</td><td><a href="https://github.com/junyangwang0410/AMBER" target="_blank" rel="noopener">Repo</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s1-11">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Benchmark Datasets, Simulators, and Generative Models for Embodied VLM <span class="badge-sm">22</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Benchmark</th><th>Domain</th><th>Type</th><th>Project</th></tr></thead><tbody><tr><td><a href="https://arxiv.org/abs/2501.04003" target="_blank" rel="noopener">Drive-Bench</a></td><td>Embodied AI</td><td>Autonomous Driving</td><td><a href="https://drive-bench.github.io" target="_blank" rel="noopener">Website</a></td></tr><tr><td><a href="https://arxiv.org/pdf/1904.01201" target="_blank" rel="noopener">Habitat</a>, <a href="https://arxiv.org/pdf/2106.14405" target="_blank" rel="noopener">Habitat 2.0</a>, <a href="https://arxiv.org/pdf/2310.13724" target="_blank" rel="noopener">Habitat 3.0</a></td><td>Robotics (Navigation)</td><td>Simulator + Dataset</td><td><a href="https://aihabitat.org/" target="_blank" rel="noopener">Website</a></td></tr><tr><td><a href="https://arxiv.org/pdf/1808.10654" target="_blank" rel="noopener">Gibson</a></td><td>Robotics (Navigation)</td><td>Simulator + Dataset</td><td><a href="http://gibsonenv.stanford.edu/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/StanfordVL/GibsonEnv" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2012.02924" target="_blank" rel="noopener">iGibson1.0</a>, <a href="https://arxiv.org/pdf/2108.03272" target="_blank" rel="noopener">iGibson2.0</a></td><td>Robotics (Navigation)</td><td>Simulator + Dataset</td><td><a href="https://svl.stanford.edu/igibson/" target="_blank" rel="noopener">Website</a>, <a href="https://stanfordvl.github.io/iGibson/" target="_blank" rel="noopener">Document</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2108.10470" target="_blank" rel="noopener">Isaac Gym</a></td><td>Robotics (Navigation)</td><td>Simulator</td><td><a href="https://developer.nvidia.com/isaac-gym" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/isaac-sim/IsaacGymEnvs" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2301.04195" target="_blank" rel="noopener">Isaac Lab</a></td><td>Robotics (Navigation)</td><td>Simulator</td><td><a href="https://isaac-sim.github.io/IsaacLab/main/index.html" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/isaac-sim/IsaacLab" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/1712.05474" target="_blank" rel="noopener">AI2THOR</a></td><td>Robotics (Navigation)</td><td>Simulator</td><td><a href="https://ai2thor.allenai.org/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/allenai/ai2thor" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2206.06994" target="_blank" rel="noopener">ProcTHOR</a></td><td>Robotics (Navigation)</td><td>Simulator + Dataset</td><td><a href="https://procthor.allenai.org/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/allenai/procthor" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/1806.07011" target="_blank" rel="noopener">VirtualHome</a></td><td>Robotics (Navigation)</td><td>Simulator</td><td><a href="http://virtual-home.org/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/xavierpuigf/virtualhome" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/abs/2007.04954" target="_blank" rel="noopener">ThreeDWorld</a></td><td>Robotics (Navigation)</td><td>Simulator</td><td><a href="https://www.threedworld.org/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/threedworld-mit/tdw" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2210.03094" target="_blank" rel="noopener">VIMA-Bench</a></td><td>Robotics (Manipulation)</td><td>Simulator</td><td><a href="https://vimalabs.github.io/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/vimalabs/VIMA" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2206.08522" target="_blank" rel="noopener">VLMbench</a></td><td>Robotics (Manipulation)</td><td>Simulator</td><td><a href="https://github.com/eric-ai-lab/VLMbench" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2112.03227" target="_blank" rel="noopener">CALVIN</a></td><td>Robotics (Manipulation)</td><td>Simulator</td><td><a href="http://calvin.cs.uni-freiburg.de/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/mees/calvin" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2410.01345" target="_blank" rel="noopener">GemBench</a></td><td>Robotics (Manipulation)</td><td>Simulator</td><td><a href="https://www.di.ens.fr/willow/research/gembench/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/vlc-robot/robot-3dlotus/" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2307.13854" target="_blank" rel="noopener">WebArena</a></td><td>Web Agent</td><td>Simulator</td><td><a href="https://webarena.dev/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/web-arena-x/webarena" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://openreview.net/pdf?id=sFyTZEqmUY" target="_blank" rel="noopener">UniSim</a></td><td>Robotics (Manipulation)</td><td>Generative Model, World Model</td><td><a href="https://universal-simulator.github.io/unisim/" target="_blank" rel="noopener">Website</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2309.17080" target="_blank" rel="noopener">GAIA-1</a></td><td>Robotics (Automonous Driving)</td><td>Generative Model, World Model</td><td><a href="https://wayve.ai/thinking/introducing-gaia1/" target="_blank" rel="noopener">Website</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2402.08268" target="_blank" rel="noopener">LWM</a></td><td>Embodied AI</td><td>Generative Model, World Model</td><td><a href="https://largeworldmodel.github.io/lwm/" target="_blank" rel="noopener">Website</a>, <a href="https://github.com/LargeWorldModel/LWM" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://github.com/Genesis-Embodied-AI/Genesis" target="_blank" rel="noopener">Genesis</a></td><td>Embodied AI</td><td>Generative Model, World Model</td><td><a href="https://github.com/Genesis-Embodied-AI/Genesis" target="_blank" rel="noopener">Github Repo</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2503.08604" target="_blank" rel="noopener">EMMOE</a></td><td>Embodied AI</td><td>Generative Model, World Model</td><td><a href="https://arxiv.org/pdf/2503.08604" target="_blank" rel="noopener">Paper</a></td></tr><tr><td><a href="https://arxiv.org/pdf/2311.01455" target="_blank" rel="noopener">RoboGen</a></td><td>Embodied AI</td><td>Generative Model, World Model</td><td><a href="https://robogen-ai.github.io/" target="_blank" rel="noopener">Website</a></td></tr><tr><td><a href="https://arxiv.org/abs/2412.20977" target="_blank" rel="noopener">UnrealZoo</a></td><td>Embodied AI (Tracking, Navigation, Multi Agent)</td><td>Simulator</td><td><a href="http://unrealzoo.site/" target="_blank" rel="noopener">Website</a></td></tr></tbody></table></div></div>
</div>
</div>
</section>
<section class="section" id="s2">
<div class="section-banner" style="--accent:#f59e0b">
<img src="https://images.unsplash.com/photo-1620712943543-bcc4688e7485?w=800&q=80" alt="" class="banner-img" loading="lazy"/>
<div class="banner-overlay"></div>
<div class="banner-text">
<i class="fas fa-fire banner-icon" style="color:#f59e0b"></i>
<h2>⚒️ Post-Training</h2>
<p class="entry-count">53 entries</p>
</div>
</div>
<div class="section-content">
<div class="subsection" id="s2-0">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>RL Alignment for VLM <span class="badge-sm">27</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>RL</th><th>Code</th></tr></thead><tbody><tr><td>Vero: An Open RL Recipe for General Visual Reasoning</td><td>04/2026</td><td><a href="https://arxiv.org/html/2604.04917v1" target="_blank" rel="noopener">Paper</a></td><td>Task-routed rewards; GRPO-based</td><td><a href="https://github.com/TIGER-AI-Lab/Vero" target="_blank" rel="noopener">Code</a></td></tr><tr><td>wDPO: Winsorized Direct Preference Optimization for Robust Alignment</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.07211" target="_blank" rel="noopener">Paper</a></td><td>wDPO</td><td>-</td></tr><tr><td></td><td>f-GRPO and Beyond: Divergence-Based RL for General LLM Alignment</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.05946" target="_blank" rel="noopener">Paper</a></td><td>f-GRPO / f-HAL</td></tr><tr><td></td><td>From Sight to Insight: Improving Visual Reasoning of MLLMs via Reinforcement Learning</td><td>01/2026</td><td><a href="https://arxiv.org/abs/2601.00215" target="_blank" rel="noopener">Paper</a></td><td>GRPO (6 reward functions)</td></tr><tr><td></td><td>SaFeR-VLM: Safety-Aware Reinforcement Learning for Multimodal Reasoning</td><td>2026 (ICLR)</td><td><a href="https://openreview.net/pdf/4f379d45027946b58a820908fd3a1711d66daa85.pdf" target="_blank" rel="noopener">Paper</a></td><td>GRPO + safety reward</td></tr><tr><td></td><td>SAIL-RL: Guiding MLLMs in When and How to Think via Dual-Reward RL Tuning</td><td>11/2025</td><td><a href="https://arxiv.org/abs/2511.02280" target="_blank" rel="noopener">Paper</a></td><td>Dual-Reward (Thinking + Judging)</td></tr><tr><td></td><td>GIFT: Group-relative Implicit Fine Tuning Integrates GRPO with DPO and UNA</td><td>10/2025</td><td><a href="https://arxiv.org/abs/2510.23868" target="_blank" rel="noopener">Paper</a></td><td>GIFT (convex MSE loss)</td></tr><tr><td></td><td>Game-RL: Synthesizing Multimodal Verifiable Game Data to Boost VLMs' General Reasoning</td><td>10/12/2025</td><td><a href="https://arxiv.org/abs/2505.13886" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td></tr><tr><td>Vision-Zero: Scalable VLM Self-Improvement via Strategic Gamified Self-Play</td><td>09/29/2025</td><td><a href="https://www.arxiv.org/abs/2509.25541" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td>-</td></tr><tr><td>Vision-SR1: Self-rewarding vision-language model via reasoning decomposition</td><td>08/26/2025</td><td><a href="https://arxiv.org/abs/2508.19652" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td>-</td></tr><tr><td>Group Sequence Policy Optimization</td><td>06/24/2025</td><td><a href="https://www.arxiv.org/abs/2507.18071" target="_blank" rel="noopener">Paper</a></td><td>GSPO</td><td>-</td></tr><tr><td>Visionary-R1: Mitigating Shortcuts in Visual Reasoning with Reinforcement Learning</td><td>05/20/2025</td><td><a href="https://arxiv.org/abs/2505.14677" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td>-</td></tr><tr><td>VideoChat-R1: Enhancing Spatio-Temporal Perception via Reinforcement Fine-Tuning</td><td>2025/04/10</td><td><a href="https://arxiv.org/abs/2504.06958" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/OpenGVLab/VideoChat-R1" target="_blank" rel="noopener">Code</a></td></tr><tr><td>OpenVLThinker: An Early Exploration to Complex Vision-Language Reasoning via Iterative Self-Improvement</td><td>2025/03/21</td><td><a href="https://arxiv.org/abs/2503.17352" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/yihedeng9/OpenVLThinker" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Boosting the Generalization and Reasoning of Vision Language Models with Curriculum Reinforcement Learning</td><td>2025/03/10</td><td><a href="https://arxiv.org/abs/2503.07065" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/ding523/Curr_REFT" target="_blank" rel="noopener">Code</a></td></tr><tr><td>OmniAlign-V: Towards Enhanced Alignment of MLLMs with Human Preference</td><td>2025</td><td><a href="https://arxiv.org/abs/2502.18411" target="_blank" rel="noopener">Paper</a></td><td>DPO</td><td><a href="https://github.com/PhoenixZ810/OmniAlign-V" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Multimodal Open R1/R1-Multimodal-Journey</td><td>2025</td><td>-</td><td>GRPO</td><td><a href="https://github.com/EvolvingLMMs-Lab/open-r1-multimodal" target="_blank" rel="noopener">Code</a></td></tr><tr><td>R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.12937" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/jingyi0000/R1-VL" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Agent-R1: Training Powerful LLM Agents with End-to-End Reinforcement Learning</td><td>2025</td><td>-</td><td>PPO/REINFORCE++/GRPO</td><td><a href="https://github.com/0russwest0/Agent-R1" target="_blank" rel="noopener">Code</a></td></tr><tr><td>MM-Eureka: Exploring Visual Aha Moment with Rule-based Large-scale Reinforcement Learning</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.07365" target="_blank" rel="noopener">Paper</a></td><td><a href="https://openreview.net/pdf?id=r1lgTGL5DE" target="_blank" rel="noopener">REINFORCE Leave-One-Out (RLOO)</a></td><td><a href="https://github.com/ModalMinds/MM-EUREKA" target="_blank" rel="noopener">Code</a></td></tr><tr><td>MM-RLHF: The Next Step Forward in Multimodal LLM Alignment</td><td>2025</td><td><a href="https://arxiv.org/abs/2502.10391" target="_blank" rel="noopener">Paper</a></td><td>DPO</td><td><a href="https://github.com/Kwai-YuanQi/MM-RLHF" target="_blank" rel="noopener">Code</a></td></tr><tr><td>LMM-R1: Empowering 3B LMMs with Strong Reasoning Abilities Through Two-Stage Rule-Based RL</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.07536" target="_blank" rel="noopener">Paper</a></td><td>PPO</td><td><a href="https://github.com/TideDra/lmm-r1" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Vision-R1: Incentivizing Reasoning Capability in Multimodal Large Language Models</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.06749" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/Osilly/Vision-R1" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Unified Reward Model for Multimodal Understanding and Generation</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.05236" target="_blank" rel="noopener">Paper</a></td><td>DPO</td><td><a href="https://github.com/CodeGoat24/UnifiedReward" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Can We Generate Images with CoT? Let's Verify and Reinforce Image Generation Step by Step</td><td>2025</td><td><a href="https://arxiv.org/pdf/2501.13926" target="_blank" rel="noopener">Paper</a></td><td>DPO</td><td><a href="https://github.com/ZiyuGuo99/Image-Generation-CoT" target="_blank" rel="noopener">Code</a></td></tr><tr><td>All Roads Lead to Likelihood: The Value of Reinforcement Learning in Fine-Tuning</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.01067" target="_blank" rel="noopener">Paper</a></td><td>Online RL</td><td>-</td></tr><tr><td>Video-R1: Reinforcing Video Reasoning in MLLMs</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.21776" target="_blank" rel="noopener">Paper</a></td><td>GRPO</td><td><a href="https://github.com/tulerfeng/Video-R1" target="_blank" rel="noopener">Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s2-1">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Finetuning for VLM <span class="badge-sm">14</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>AGFT: Alignment-Guided Fine-Tuning for Zero-Shot Adversarial Robustness of VLMs</td><td>2026/03</td><td><a href="https://arxiv.org/abs/2603.29410" target="_blank" rel="noopener">Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>CoVFT: Context-aware Visual Fine-tuning for Multimodal Large Language Models</td><td>2026/03</td><td><a href="https://arxiv.org/abs/2603.21077" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>MERGETUNE: Continued Fine-Tuning of Vision-Language Models</td><td>2026/01 (ICLR 2026)</td><td><a href="https://arxiv.org/abs/2601.10497" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Mask Fine-Tuning (MFT): Unlocking Hidden Capabilities in Vision-Language Models</td><td>2025/12</td><td><a href="https://arxiv.org/abs/2512.23073" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Image-LoRA: Towards Minimal Fine-Tuning of VLMs</td><td>2025/12</td><td><a href="https://arxiv.org/abs/2512.19219" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Reassessing the Role of Supervised Fine-Tuning: An Empirical Study in VLM Reasoning</td><td>2025/12</td><td><a href="https://arxiv.org/abs/2512.12690" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Eagle 2.5: Boosting Long-Context Post-Training for Frontier Vision-Language Models</td><td>2025/04/21</td><td><a href="https://arxiv.org/abs/2504.15271" target="_blank" rel="noopener">Paper</a></td><td><a href="https://nvlabs.github.io/EAGLE/" target="_blank" rel="noopener">Website</a></td></tr><tr><td>OMNICAPTIONER: One Captioner to Rule Them All</td><td>2025/04/09</td><td><a href="https://arxiv.org/abs/2504.07089" target="_blank" rel="noopener">Paper</a></td><td><a href="https://alpha-innovator.github.io/OmniCaptioner-project-page/" target="_blank" rel="noopener">Website</a></td><td><a href="https://github.com/Alpha-Innovator/OmniCaptioner" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Inst-IT: Boosting Multimodal Instance Understanding via Explicit Visual Prompt Instruction Tuning</td><td>2024</td><td><a href="https://arxiv.org/abs/2412.03565" target="_blank" rel="noopener">Paper</a></td><td><a href="https://github.com/Alpha-Innovator/OmniCaptioner" target="_blank" rel="noopener">Website</a></td><td><a href="https://github.com/inst-it/inst-it" target="_blank" rel="noopener">Code</a></td></tr><tr><td>LLaVolta: Efficient Multi-modal Models via Stage-wise Visual Context Compression</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.20092" target="_blank" rel="noopener">Paper</a></td><td><a href="https://beckschen.github.io/llavolta.html" target="_blank" rel="noopener">Website</a></td><td><a href="https://github.com/Beckschen/LLaVolta" target="_blank" rel="noopener">Code</a></td></tr><tr><td>ViTamin: Designing Scalable Vision Models in the Vision-Language Era</td><td>2024</td><td><a href="https://arxiv.org/pdf/2404.02132" target="_blank" rel="noopener">Paper</a></td><td><a href="https://beckschen.github.io/vitamin.html" target="_blank" rel="noopener">Website</a></td><td><a href="https://github.com/Beckschen/ViTamin" target="_blank" rel="noopener">Code</a></td></tr><tr><td>Espresso: High Compression For Rich Extraction From Videos for Your Vision-Language Model</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.04729" target="_blank" rel="noopener">Paper</a></td><td>-</td><td>-</td></tr><tr><td>Should VLMs be Pre-trained with Image Data?</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.07603" target="_blank" rel="noopener">Paper</a></td><td>-</td><td>-</td></tr><tr><td>VisionArena: 230K Real World User-VLM Conversations with Preference Labels</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.08687" target="_blank" rel="noopener">Paper</a></td><td>-</td><td><a href="https://huggingface.co/lmarena-ai" target="_blank" rel="noopener">Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s2-2">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>VLM Alignment github <span class="badge-sm">7</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Project</th><th>Repository Link</th></tr></thead><tbody><tr><td>Verl</td><td><a href="https://github.com/volcengine/verl" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>EasyR1</td><td><a href="https://github.com/hiyouga/EasyR1" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>OpenR1</td><td><a href="https://github.com/EvolvingLMMs-Lab/open-r1-multimodal" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>LLaMAFactory</td><td><a href="https://github.com/hiyouga/LLaMA-Factory" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>MM-Eureka-Zero</td><td><a href="https://github.com/ModalMinds/MM-EUREKA/tree/main" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>MM-RLHF</td><td><a href="https://github.com/Kwai-YuanQi/MM-RLHF" target="_blank" rel="noopener">🔗 GitHub</a></td></tr><tr><td>LMM-R1</td><td><a href="https://github.com/TideDra/lmm-r1" target="_blank" rel="noopener">🔗 GitHub</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s2-3">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Prompt Optimization <span class="badge-sm">5</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>EvoPrompt: Evolving Prompt Adaptation for Vision-Language Models</td><td>2026/03</td><td><a href="https://arxiv.org/abs/2603.09493" target="_blank" rel="noopener">Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>MMLoP: Multi-Modal Low-Rank Prompting for Efficient Vision-Language Adaptation</td><td>2026/02</td><td><a href="https://arxiv.org/abs/2602.21397" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Multimodal Prompt Optimizer (MPO): Joint Optimization of Multimodal Prompts</td><td>2025/10</td><td><a href="https://arxiv.org/abs/2510.09201" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>Evolutionary Prompt Optimization Discovers Emergent Multimodal Reasoning Strategies</td><td>2025/03</td><td><a href="https://arxiv.org/abs/2503.23503" target="_blank" rel="noopener">Paper</a></td><td>-</td></tr><tr><td></td><td>In-ContextEdit:EnablingInstructionalImageEditingwithIn-Context GenerationinLargeScaleDiffusionTransformer</td><td>2025/04/30</td><td><a href="https://arxiv.org/abs/2504.20690" target="_blank" rel="noopener">Paper</a></td><td><a href="https://river-zhang.github.io/ICEdit-gh-pages/" target="_blank" rel="noopener">Website</a></td></tr></tbody></table></div></div>
</div>
</div>
</section>
<section class="section" id="s3">
<div class="section-banner" style="--accent:#10b981">
<img src="https://images.unsplash.com/photo-1485827404703-89b55fcc595e?w=800&q=80" alt="" class="banner-img" loading="lazy"/>
<div class="banner-overlay"></div>
<div class="banner-text">
<i class="fas fa-cogs banner-icon" style="color:#10b981"></i>
<h2>⚒️ Applications</h2>
<p class="entry-count">116 entries</p>
</div>
</div>
<div class="section-content">
<div class="subsection" id="s3-0">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Embodied VLM Agents <span class="badge-sm">9</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper Link</th></tr></thead><tbody><tr><td>Aligning Cyber Space with Physical World: A Comprehensive Survey on Embodied AI</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.06886v1" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>ScreenAI: A Vision-Language Model for UI and Infographics Understanding</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.04615" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>ChartLlama: A Multimodal LLM for Chart Understanding and Generation</td><td>2023</td><td><a href="https://arxiv.org/pdf/2311.16483" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>SciDoc2Diagrammer-MAF: Towards Generation of Scientific Diagrams from Documents guided by Multi-Aspect Feedback Refinement</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.19242" target="_blank" rel="noopener">📄 Paper</a></td></tr><tr><td>Training a Vision Language Model as Smartphone Assistant</td><td>2024</td><td><a href="https://arxiv.org/pdf/2404.08755" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>ScreenAgent: A Vision-Language Model-Driven Computer Control Agent</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.07945" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>Embodied Vision-Language Programmer from Environmental Feedback</td><td>2024</td><td><a href="https://arxiv.org/pdf/2310.08588" target="_blank" rel="noopener">Paper</a></td></tr><tr><td>VLMs Play StarCraft II: A Benchmark and Multimodal Decision Method</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.05383" target="_blank" rel="noopener">📄 Paper</a></td></tr><tr><td>MP-GUI: Modality Perception with MLLMs for GUI Understanding</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.14021" target="_blank" rel="noopener">📄 Paper</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-1">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Generative Visual Media Applications <span class="badge-sm">4</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>GPT4Motion: Scripting Physical Motions in Text-to-Video Generation via Blender-Oriented GPT Planning</td><td>2023</td><td><a href="https://arxiv.org/pdf/2311.12631" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://gpt4motion.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/jiaxilv/GPT4Motion" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Spurious Correlation in Multimodal LLMs</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.08884" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>WeGen: A Unified Model for Interactive Multimodal Generation as We Chat</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.01115" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/hzphzp/WeGen" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VideoMind: A Chain-of-LoRA Agent for Long Video Reasoning</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.13444" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://videomind.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/yeliudev/VideoMind" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-2">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Robotics and Embodied AI <span class="badge-sm">31</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>AHA: A Vision-Language-Model for Detecting and Reasoning Over Failures in Robotic Manipulation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.00371" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://aha-vlm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>SpatialVLM: Endowing Vision-Language Models with Spatial Reasoning Capabilities</td><td>2024</td><td><a href="https://arxiv.org/pdf/2401.12168" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://spatial-vlm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Vision-language model-driven scene understanding and robotic object manipulation</td><td>2024</td><td><a href="https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10711845&casa_token=to4vCckCewMAAAAA:2ykeIrubUOxwJ1rhwwakorQFAwUUBQhL_Ct7dnYBceWU5qYXiCoJp_yQkmJbmtiEVuX2jcpvB92n&tag=1" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Guiding Long-Horizon Task and Motion Planning with Vision Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.02193" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://zt-yang.github.io/vlm-tamp-robot/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>AutoTAMP: Autoregressive Task and Motion Planning with LLMs as Translators and Checkers</td><td>2023</td><td><a href="https://arxiv.org/pdf/2306.06531" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://yongchao98.github.io/MIT-REALM-AutoTAMP/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>VLM See, Robot Do: Human Demo Video to Robot Action Plan via Vision Language Model</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.08792" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Scalable Multi-Robot Collaboration with Large Language Models: Centralized or Decentralized Systems?</td><td>2023</td><td><a href="https://arxiv.org/pdf/2309.15943" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://yongchao98.github.io/MIT-REALM-Multi-Robot/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>DART-LLM: Dependency-Aware Multi-Robot Task Decomposition and Execution using Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2411.09022" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://wyd0817.github.io/project-dart-llm/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>MotionGPT: Human Motion as a Foreign Language</td><td>2023</td><td><a href="https://proceedings.neurips.cc/paper_files/paper/2023/file/3fbf0c1ea0716c03dea93bb6be78dd6f-Paper-Conference.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/OpenMotionLab/MotionGPT" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Learning Reward for Robot Skills Using Large Language Models via Self-Alignment</td><td>2024</td><td><a href="https://arxiv.org/pdf/2405.07162" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Language to Rewards for Robotic Skill Synthesis</td><td>2023</td><td><a href="https://language-to-reward.github.io/assets/l2r.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://language-to-reward.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Eureka: Human-Level Reward Design via Coding Large Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.12931" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://eureka-research.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Integrated Task and Motion Planning</td><td>2020</td><td><a href="https://arxiv.org/pdf/2010.01083" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Jailbreaking LLM-Controlled Robots</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.13691" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://robopair.org/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Robots Enact Malignant Stereotypes</td><td>2022</td><td><a href="https://arxiv.org/pdf/2207.11569" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/robots-enact-stereotypes" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>LLM-Driven Robots Risk Enacting Discrimination, Violence, and Unlawful Actions</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.08824" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Highlighting the Safety Concerns of Deploying LLMs/VLMs in Robotics</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.10340" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://wuxiyang1996.github.io/adversary-vlm-robotics/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>EmbodiedBench: Comprehensive Benchmarking Multi-modal Large Language Models for Vision-Driven Embodied Agents</td><td>2025</td><td><a href="https://arxiv.org/pdf/2502.09560" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://embodiedbench.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/EmbodiedBench/EmbodiedBench" target="_blank" rel="noopener">💾 Code & Dataset</a></td></tr><tr><td>Gemini Robotics: Bringing AI into the Physical World</td><td>2025</td><td><a href="https://storage.googleapis.com/deepmind-media/gemini-robotics/gemini_robotics_report.pdf" target="_blank" rel="noopener">📄 Technical Report</a></td><td><a href="https://deepmind.google/technologies/gemini-robotics/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>GR-2: A Generative Video-Language-Action Model with Web-Scale Knowledge for Robot Manipulation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.06158" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://gr2-manipulation.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Magma: A Foundation Model for Multimodal AI Agents</td><td>2025</td><td><a href="https://arxiv.org/pdf/2502.13130" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://microsoft.github.io/Magma/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/microsoft/Magma" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>DayDreamer: World Models for Physical Robot Learning</td><td>2022</td><td><a href="https://arxiv.org/pdf/2206.14176" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://danijar.com/project/daydreamer/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/danijar/daydreamer" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Hi Robot: Open-Ended Instruction Following with Hierarchical Vision-Language-Action Models</td><td>2025</td><td><a href="https://arxiv.org/pdf/2206.14176" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>RL-VLM-F: Reinforcement Learning from Vision Language Foundation Model Feedback</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.03681" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://rlvlmf2024.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/yufeiwang63/RL-VLM-F" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>KALIE: Fine-Tuning Vision-Language Models for Open-World Manipulation without Robot Data</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.14066" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://kalie-vlm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/gractang/kalie" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Unified Video Action Model</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.00200" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://unified-video-action-model.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/ShuangLI59/unified_video_action" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>HybridVLA: Collaborative Diffusion and Autoregression in a Unified Vision-Language-Action Model</td><td>2025</td><td><a href="https://arxiv.org/abs/2503.10631" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://hybrid-vla.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/PKU-HMI-Lab/Hybrid-VLA" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td></td><td>DAM-VLA: A Dynamic Action Model-Based Vision-Language-Action Framework for Robot Manipulation</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.00926" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>NS-VLA: Towards Neuro-Symbolic Vision-Language-Action Models</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.09542" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>Steerable Vision-Language-Action Policies for Embodied Reasoning and Hierarchical Control</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.13193" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>ST4VLA: Spatial Guided Training for Vision-Language-Action Models</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.10109" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-3">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Manipulation <span class="badge-sm">11</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>VIMA: General Robot Manipulation with Multimodal Prompts</td><td>2022</td><td><a href="https://arxiv.org/pdf/2210.03094" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://vimalabs.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td></td></tr><tr><td>Instruct2Act: Mapping Multi-Modality Instructions to Robotic Actions with Large Language Model</td><td>2023</td><td><a href="https://arxiv.org/pdf/2305.11176" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Creative Robot Tool Use with Large Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.13065" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://creative-robotool.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>RoboVQA: Multimodal Long-Horizon Reasoning for Robotics</td><td>2024</td><td><a href="https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10610216" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>RT-1: Robotics Transformer for Real-World Control at Scale</td><td>2022</td><td><a href="https://robotics-transformer1.github.io/assets/rt1.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://robotics-transformer1.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>RT-2: Vision-Language-Action Models Transfer Web Knowledge to Robotic Control</td><td>2023</td><td><a href="https://arxiv.org/pdf/2307.15818" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://robotics-transformer2.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Open X-Embodiment: Robotic Learning Datasets and RT-X Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.08864" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://robotics-transformer-x.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>ExploRLLM: Guiding Exploration in Reinforcement Learning with Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.09583" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://explorllm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>AnyTouch: Learning Unified Static-Dynamic Representation across Multiple Visuo-tactile Sensors</td><td>2025</td><td><a href="https://arxiv.org/pdf/2502.12191" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://gewu-lab.github.io/AnyTouch/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/GeWu-Lab/AnyTouch" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Masked World Models for Visual Control</td><td>2022</td><td><a href="https://arxiv.org/pdf/2206.14244" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/mwm-rl" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/younggyoseo/MWM" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Multi-View Masked World Models for Visual Robotic Manipulation</td><td>2023</td><td><a href="https://arxiv.org/pdf/2302.02408" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/mv-mwm" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/younggyoseo/MV-MWM" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-4">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Navigation <span class="badge-sm">8</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>ZSON: Zero-Shot Object-Goal Navigation using Multimodal Goal Embeddings</td><td>2022</td><td><a href="https://arxiv.org/pdf/2206.12403" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>LOC-ZSON: Language-driven Object-Centric Zero-Shot Object Retrieval and Navigation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2405.05363" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>LM-Nav: Robotic Navigation with Large Pre-Trained Models of Language, Vision, and Action</td><td>2022</td><td><a href="https://arxiv.org/pdf/2207.04429" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/lmnav" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>NaVILA: Legged Robot Vision-Language-Action Model for Navigation</td><td>2022</td><td><a href="https://arxiv.org/pdf/2412.04453" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://navila-bot.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>VLFM: Vision-Language Frontier Maps for Zero-Shot Semantic Navigation</td><td>2024</td><td><a href="https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10610712&casa_token=qvFCSt20n0MAAAAA:MSC4P7bdlfQuMRFrmIl706B-G8ejcxH9ZKROKETL1IUZIW7m_W4hKW-kWrxw-F8nykoysw3WYHnd" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Navigation with Large Language Models: Semantic Guesswork as a Heuristic for Planning</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.10103" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/lfg-nav/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Vi-LAD: Vision-Language Attention Distillation for Socially-Aware Robot Navigation in Dynamic Environments</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.09820" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Navigation World Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.03572" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://www.amirbar.net/nwm/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-5">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Human-robot Interaction <span class="badge-sm">3</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>MUTEX: Learning Unified Policies from Multimodal Task Specifications</td><td>2023</td><td><a href="https://arxiv.org/pdf/2309.14320" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://ut-austin-rpl.github.io/MUTEX/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>LaMI: Large Language Models for Multi-Modal Human-Robot Interaction</td><td>2024</td><td><a href="https://arxiv.org/pdf/2401.15174" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://hri-eu.github.io/Lami/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>VLM-Social-Nav: Socially Aware Robot Navigation through Scoring using Vision-Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2404.00210" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-6">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Autonomous Driving <span class="badge-sm">16</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>UniDriveVLA: Unifying Understanding, Perception, and Action Planning for Autonomous Driving</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.02190" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>AutoMoT: A Unified Vision-Language-Action Model with Asynchronous Mixture-of-Transformers for End-to-End Autonomous Driving</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.14851" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>DriveVLM-RL: Neuroscience-Inspired Reinforcement Learning with Vision-Language Models for Safe Autonomous Driving</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.18315" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>HiST-VLA: A Hierarchical Spatio-Temporal Vision-Language-Action Model for End-to-End Autonomous Driving</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.13329" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>OpenDriveVLA: Towards End-to-end Autonomous Driving with Large Vision Language Action Model</td><td>03/2025</td><td><a href="https://arxiv.org/abs/2503.23463" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>Are VLMs Ready for Autonomous Driving? An Empirical Study from the Reliability, Data, and Metric Perspectives</td><td>01/07/2025</td><td><a href="https://arxiv.org/abs/2501.04003" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="drive-bench.github.io" target="_blank" rel="noopener">🌍 Website</a></td></tr><tr><td>DriveVLM: The Convergence of Autonomous Driving and Large Vision-Language Models</td><td>2024</td><td><a href="https://arxiv.org/abs/2402.12289" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://tsinghua-mars-lab.github.io/DriveVLM/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>GPT-Driver: Learning to Drive with GPT</td><td>2023</td><td><a href="https://arxiv.org/abs/2310.01415" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>LanguageMPC: Large Language Models as Decision Makers for Autonomous Driving</td><td>2023</td><td><a href="https://arxiv.org/abs/2310.03026" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://sites.google.com/view/llm-mpc" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Driving with LLMs: Fusing Object-Level Vector Modality for Explainable Autonomous Driving</td><td>2023</td><td><a href="https://arxiv.org/abs/2310.01957" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Referring Multi-Object Tracking</td><td>2023</td><td><a href="https://arxiv.org/pdf/2303.03366" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/wudongming97/RMOT" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VLPD: Context-Aware Pedestrian Detection via Vision-Language Semantic Self-Supervision</td><td>2023</td><td><a href="https://arxiv.org/pdf/2304.03135" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/lmy98129/VLPD" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>MotionLM: Multi-Agent Motion Forecasting as Language Modeling</td><td>2023</td><td><a href="https://arxiv.org/pdf/2309.16534" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>DiLu: A Knowledge-Driven Approach to Autonomous Driving with Large Language Models</td><td>2023</td><td><a href="https://arxiv.org/abs/2309.16292" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://pjlab-adg.github.io/DiLu/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>VLP: Vision Language Planning for Autonomous Driving</td><td>2024</td><td><a href="https://arxiv.org/pdf/2401.05577" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>DriveGPT4: Interpretable End-to-end Autonomous Driving via Large Language Model</td><td>2023</td><td><a href="https://arxiv.org/abs/2310.01412" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-7">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Human-Centered AI <span class="badge-sm">5</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>DLF: Disentangled-Language-Focused Multimodal Sentiment Analysis</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.12225" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/pwang322/DLF" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>LIT: Large Language Model Driven Intention Tracking for Proactive Human-Robot Collaboration – A Robot Sous-Chef Application</td><td>2024</td><td><a href="https://arxiv.org/abs/2406.13787" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Pretrained Language Models as Visual Planners for Human Assistance</td><td>2023</td><td><a href="https://arxiv.org/pdf/2304.09179" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Promoting AI Equity in Science: Generalized Domain Prompt Learning for Accessible VLM Research</td><td>2024</td><td><a href="https://arxiv.org/pdf/2405.08668" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Image and Data Mining in Reticular Chemistry Using GPT-4V</td><td>2023</td><td><a href="https://arxiv.org/pdf/2312.05468" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-8">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Web Agent <span class="badge-sm">7</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis</td><td>2023</td><td><a href="https://arxiv.org/pdf/2307.12856" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>CogAgent: A Visual Language Model for GUI Agents</td><td>2023</td><td><a href="https://arxiv.org/pdf/2312.08914" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/THUDM/CogAgent" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>WebVoyager: Building an End-to-End Web Agent with Large Multimodal Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2401.13919" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/MinorJerry/WebVoyager" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>ShowUI: One Vision-Language-Action Model for GUI Visual Agent</td><td>2024</td><td><a href="https://arxiv.org/pdf/2411.17465" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/showlab/ShowUI" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>ScreenAgent: A Vision Language Model-driven Computer Control Agent</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.07945" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/niuzaisheng/ScreenAgent" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Web Agents with World Models: Learning and Leveraging Environment Dynamics in Web Navigation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.13232" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://huggingface.co/papers/2410.13232" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td></td><td>MolmoWeb: Open Visual Web Agent and Open Data for the Open Web</td><td>04/2026</td><td><a href="https://huggingface.tw/papers/2604.08516" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://allenai.org/blog/molmoweb" target="_blank" rel="noopener">🌍 Website</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-9">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Accessibility <span class="badge-sm">3</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>X-World: Accessibility, Vision, and Autonomy Meet</td><td>2021</td><td><a href="https://openaccess.thecvf.com/content/ICCV2021/papers/Zhang_X-World_Accessibility_Vision_and_Autonomy_Meet_ICCV_2021_paper.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Context-Aware Image Descriptions for Web Accessibility</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.03054" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Improving VR Accessibility Through Automatic 360 Scene Description Using Multimodal Large Language Models</td><td>2024</td><td><a href="https://dl.acm.org/doi/10.1145/3691573.3691619" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-10">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Healthcare <span class="badge-sm">9</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>CARE: Towards Clinical Accountability in Multi-Modal Medical Reasoning with an Evidence-Grounded Agentic Framework</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.01607" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>MedMO: Grounding and Understanding Multimodal Large Language Model for Medical Images</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.06965" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>Colon-X: Advancing Intelligent Colonoscopy from Multimodal Understanding to Clinical Reasoning</td><td>12/2025</td><td><a href="https://arxiv.org/abs/2512.03667" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td>Frontiers in Intelligent Colonoscopy</td><td>02/2025</td><td><a href="https://arxiv.org/pdf/2410.17241" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/ai4colonoscopy/IntelliScope" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VisionUnite: A Vision-Language Foundation Model for Ophthalmology Enhanced with Clinical Knowledge</td><td>2024</td><td><a href="https://arxiv.org/pdf/2408.02865" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/HUANGLIZI/VisionUnite" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Multimodal Healthcare AI: Identifying and Designing Clinically Relevant Vision-Language Applications for Radiology</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.14252" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>M-FLAG: Medical Vision-Language Pre-training with Frozen Language Models and Latent Space Geometry Optimization</td><td>2023</td><td><a href="https://arxiv.org/pdf/2307.08347" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>MedCLIP: Contrastive Learning from Unpaired Medical Images and Text</td><td>2022</td><td><a href="https://arxiv.org/pdf/2210.10163" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/RyanWangZf/MedCLIP" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Med-Flamingo: A Multimodal Medical Few-Shot Learner</td><td>2023</td><td><a href="https://arxiv.org/pdf/2307.15189" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/snap-stanford/med-flamingo" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s3-11">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Social Goodness <span class="badge-sm">10</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>Analyzing K-12 AI Education: A Large Language Model Study of Classroom Instruction on Learning Theories, Pedagogy, Tools, and AI Literacy</td><td>2024</td><td><a href="https://www.sciencedirect.com/science/article/pii/S2666920X24000985" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Students Rather Than Experts: A New AI for Education Pipeline to Model More Human-Like and Personalized Early Adolescence</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.15701" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Harnessing Large Vision and Language Models in Agriculture: A Review</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.19679" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>A Vision-Language Model for Predicting Potential Distribution Land of Soybean Double Cropping</td><td>2024</td><td><a href="https://www.frontiersin.org/journals/environmental-science/articles/10.3389/fenvs.2024.1515752/abstract" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Vision-Language Model is NOT All You Need: Augmentation Strategies for Molecule Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.09043" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/Namkyeong/AMOLE" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>DrawEduMath: Evaluating Vision Language Models with Expert-Annotated Students’ Hand-Drawn Math Images</td><td>2024</td><td><a href="https://openreview.net/pdf?id=0vQYvcinij" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>MultiMath: Bridging Visual and Mathematical Reasoning for Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.00147" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/pengshuai-rin/MultiMath" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Vision-Language Models Meet Meteorology: Developing Models for Extreme Weather Events Detection with Heatmaps</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.09838" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/AlexJJJChen/Climate-Zoo" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>He is Very Intelligent, She is Very Beautiful? On Mitigating Social Biases in Language Modeling and Generation</td><td>2021</td><td><a href="https://aclanthology.org/2021.findings-acl.397.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>UrbanVLP: Multi-Granularity Vision-Language Pretraining for Urban Region Profiling</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.168318" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
</div>
</section>
<section class="section" id="s4">
<div class="section-banner" style="--accent:#ef4444">
<img src="https://images.unsplash.com/photo-1558494949-ef010cbdcc31?w=800&q=80" alt="" class="banner-img" loading="lazy"/>
<div class="banner-overlay"></div>
<div class="banner-text">
<i class="fas fa-shield-alt banner-icon" style="color:#ef4444"></i>
<h2>Challenges</h2>
<p class="entry-count">76 entries</p>
</div>
</div>
<div class="section-content">
<div class="subsection" id="s4-0">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Hallucination <span class="badge-sm">15</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>Focus Matters: Phase-Aware Suppression for Hallucination in Vision-Language Models</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.03556" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>VLMs Need Words: Vision Language Models Ignore Visual Detail in Favor of Semantic Anchors</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.02486" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>HALP: Detecting Hallucinations in Vision-Language Models without Generating a Single Token</td><td>03/2026</td><td><a href="https://arxiv.org/abs/2603.05465" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://aclanthology.org/2026.eacl-long.287/" target="_blank" rel="noopener">🌍 ACL</a></td></tr><tr><td></td><td>Tone Matters: The Impact of Linguistic Tone on Hallucination in VLMs</td><td>01/2026</td><td><a href="https://arxiv.org/abs/2601.06460" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>Object Hallucination in Image Captioning</td><td>2018</td><td><a href="https://arxiv.org/pdf/1809.02156" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td>Evaluating Object Hallucination in Large Vision-Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2305.10355" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/RUCAIBox/POPE" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Detecting and Preventing Hallucinations in Large Vision Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2308.06394" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>HallE-Control: Controlling Object Hallucination in Large Multimodal Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.01779" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/bronyayang/HallE_Control" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Hallu-PI: Evaluating Hallucination in Multi-modal Large Language Models within Perturbed Inputs</td><td>2024</td><td><a href="https://arxiv.org/pdf/2408.01355" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/NJUNLP/Hallu-PI" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>BEAF: Observing BEfore-AFter Changes to Evaluate Hallucination in Vision-Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.13442" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://beafbench.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>HallusionBench: An Advanced Diagnostic Suite for Entangled Language Hallucination and Visual Illusion in Large Vision-Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.14566" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/tianyi-lab/HallusionBench" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>AUTOHALLUSION: Automatic Generation of Hallucination Benchmarks for Vision-Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.10900" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://wuxiyang1996.github.io/autohallusion_page/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Mitigating Hallucination in Large Multi-Modal Models via Robust Instruction Tuning</td><td>2023</td><td><a href="https://arxiv.org/pdf/2306.14565" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/FuxiaoLiu/LRV-Instruction" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Hal-Eval: A Universal and Fine-grained Hallucination Evaluation Framework for Large Vision Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.15721" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/WisdomShell/hal-eval" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>AMBER: An LLM-free Multi-dimensional Benchmark for MLLMs Hallucination Evaluation</td><td>2023</td><td><a href="https://arxiv.org/pdf/2311.07397" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/junyangwang0410/AMBER" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-1">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Safety <span class="badge-sm">11</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>SaFeR-VLM: Safety into Multimodal Reasoning via Reinforcement Learning</td><td>2026 (ICLR)</td><td><a href="https://openreview.net/pdf/4f379d45027946b58a820908fd3a1711d66daa85.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>HoliSafe: Holistic Safety Evaluation for Vision-Language Models</td><td>2026 (ICLR)</td><td><a href="https://openreview.net/pdf/c0a7991cefe100852616861d5046c3b90cfed936.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>JailbreakZoo: Survey, Landscapes, and Horizons in Jailbreaking Large Language and Vision-Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.01599" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://chonghan-chen.com/llm-jailbreak-zoo-survey/" target="_blank" rel="noopener">🌍 Website</a></td></tr><tr><td>Safe-VLN: Collision Avoidance for Vision-and-Language Navigation of Autonomous Robots Operating in Continuous Environments</td><td>2023</td><td><a href="https://arxiv.org/pdf/2311.02817" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>SafeBench: A Safety Evaluation Framework for Multimodal Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.18927" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>JailBreakV: A Benchmark for Assessing the Robustness of MultiModal Large Language Models against Jailbreak Attacks</td><td>2024</td><td><a href="https://arxiv.org/pdf/2404.03027" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>SHIELD: An Evaluation Benchmark for Face Spoofing and Forgery Detection with Multimodal Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.04178" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/laiyingxin2/SHIELD" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Images are Achilles' Heel of Alignment: Exploiting Visual Vulnerabilities for Jailbreaking Multimodal Large Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.09792" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Jailbreaking Attack against Multimodal Large Language Model</td><td>2024</td><td><a href="https://arxiv.org/pdf/2402.02309" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>Embodied Red Teaming for Auditing Robotic Foundation Models</td><td>2025</td><td><a href="https://arxiv.org/pdf/2411.18676" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://s-karnik.github.io/embodied-red-team-project-page/" target="_blank" rel="noopener">🌍 Website</a></td></tr><tr><td>Safety Guardrails for LLM-Enabled Robots</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.07885" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-2">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Fairness <span class="badge-sm">8</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>Hallucination of Multimodal Large Language Models: A Survey</td><td>2024</td><td><a href="https://arxiv.org/pdf/2404.18930" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Bias and Fairness in Large Language Models: A Survey</td><td>2023</td><td><a href="https://arxiv.org/pdf/2309.00770" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Fairness and Bias in Multimodal AI: A Survey</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.19097" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Multi-Modal Bias: Introducing a Framework for Stereotypical Bias Assessment beyond Gender and Race in Vision–Language Models</td><td>2023</td><td><a href="http://gerard.demelo.org/papers/multimodal-bias.pdf" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>FMBench: Benchmarking Fairness in Multimodal Large Language Models on Medical Tasks</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.01089" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>FairCLIP: Harnessing Fairness in Vision-Language Learning</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.19949" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>FairMedFM: Fairness Benchmarking for Medical Imaging Foundation Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.00983" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Benchmarking Vision Language Models for Cultural Understanding</td><td>2024</td><td><a href="https://arxiv.org/pdf/2407.10920" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-4">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Multi-modality Alignment <span class="badge-sm">7</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>Mitigating Hallucinations in Large Vision-Language Models with Instruction Contrastive Decoding</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.18715" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Enhancing Visual-Language Modality Alignment in Large Vision Language Models via Self-Improvement</td><td>2024</td><td><a href="https://arxiv.org/pdf/2405.15973" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Assessing and Learning Alignment of Unimodal Vision and Language Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.04616" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://lezhang7.github.io/sail.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>Extending Multi-modal Contrastive Representations</td><td>2023</td><td><a href="https://arxiv.org/pdf/2310.08884" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/MCR-PEFT/Ex-MCR" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>OneLLM: One Framework to Align All Modalities with Language</td><td>2023</td><td><a href="https://arxiv.org/pdf/2312.03700" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/csuhan/OneLLM" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>What You See is What You Read? Improving Text-Image Alignment Evaluation</td><td>2023</td><td><a href="https://arxiv.org/pdf/2305.10400" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://wysiwyr-itm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/yonatanbitton/wysiwyr" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Critic-V: VLM Critics Help Catch VLM Errors in Multimodal Reasoning</td><td>2024</td><td><a href="https://arxiv.org/pdf/2411.18203" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://huggingface.co/papers/2411.18203" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/kyrieLei/Critic-V" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-5">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Commonsense and Physics Alignment <span class="badge-sm">16</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>VBench: Comprehensive BenchmarkSuite for Video Generative Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2311.17982" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://vchitect.github.io/VBench-project/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/Vchitect/VBench" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VBench++: Comprehensive and Versatile Benchmark Suite for Video Generative Models</td><td>2024</td><td><a href="https://arxiv.org/pdf/2411.13503" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://vchitect.github.io/VBench-project/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/Vchitect/VBench" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>PhysBench: Benchmarking and Enhancing VLMs for Physical World Understanding</td><td>2025</td><td><a href="https://arxiv.org/pdf/2501.16411" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://physbench.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/USC-GVL/PhysBench" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VideoPhy: Evaluating Physical Commonsense for Video Generation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.03520" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://videophy.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/Hritikbansal/videophy" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>WorldSimBench: Towards Video Generation Models as World Simulators</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.18072" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://iranqin.github.io/WorldSimBench.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td>-</td></tr><tr><td>WorldModelBench: Judging Video Generation Models As World Models</td><td>2025</td><td><a href="https://arxiv.org/pdf/2502.20694" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://worldmodelbench-team.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/WorldModelBench-Team/WorldModelBench/tree/main?tab=readme-ov-file" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2406.15252" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://tiger-ai-lab.github.io/VideoScore/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/TIGER-AI-Lab/VideoScore" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>WISE: A World Knowledge-Informed Semantic Evaluation for Text-to-Image Generation</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.07265" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/PKU-YuanGroup/WISE" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Content-Rich AIGC Video Quality Assessment via Intricate Text Alignment and Motion-Aware Consistency</td><td>2025</td><td><a href="https://arxiv.org/pdf/2502.04076" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/littlespray/CRAVE" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Your Large Vision-Language Model Only Needs A Few Attention Heads For Visual Grounding</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.06287" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>SpatialVLM: Endowing Vision-Language Models with Spatial Reasoning Capabilities</td><td>2024</td><td><a href="https://arxiv.org/pdf/2401.12168" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://spatial-vlm.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/remyxai/VQASynth" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Do generative video models understand physical principles?</td><td>2025</td><td><a href="https://arxiv.org/pdf/2501.09038" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://physics-iq.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/google-deepmind/physics-IQ-benchmark" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>PhysGen: Rigid-Body Physics-Grounded Image-to-Video Generation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.18964" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://stevenlsw.github.io/physgen/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/stevenlsw/physgen" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>How Far is Video Generation from World Model: A Physical Law Perspective</td><td>2024</td><td><a href="https://arxiv.org/pdf/2411.02385" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://phyworld.github.io/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/phyworld/phyworld" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Imagine while Reasoning in Space: Multimodal Visualization-of-Thought</td><td>2025</td><td><a href="https://arxiv.org/abs/2501.07542" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>VBench-2.0: Advancing Video Generation Benchmark Suite for Intrinsic Faithfulness</td><td>2025</td><td><a href="https://arxiv.org/pdf/2503.21755" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://vchitect.github.io/VBench-2.0-project/" target="_blank" rel="noopener">🌍 Website</a></td><td><a href="https://github.com/Vchitect/VBench" target="_blank" rel="noopener">💾 Code</a></td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-6">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Efficient Training and Fine-Tuning <span class="badge-sm">12</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>QAPruner: Quantization-Aware Vision Token Pruning for MLLMs</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.02816" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td></td><td>Firebolt-VL: Efficient Vision-Language Understanding with Cross-Modality Modulation</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.04579" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>CoME-VL: Scaling Complementary Multi-Encoder Vision-Language Learning</td><td>04/2026</td><td><a href="https://arxiv.org/abs/2604.03231" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>LoRA-Squeeze: Simple and Effective Post-Tuning and In-Tuning Compression of LoRA Modules</td><td>02/2026</td><td><a href="https://arxiv.org/abs/2602.10993" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>GRACE: Gated Relational Alignment via Confidence-based Distillation for Efficient VLMs</td><td>01/2026</td><td><a href="https://arxiv.org/abs/2601.22709" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>VLMQ: Post-Training Quantization for Large Vision-Language Models</td><td>2026 (ICLR)</td><td><a href="https://openreview.net/pdf?id=CXVf8Vx2E2" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td></td><td>VILA: On Pre-training for Visual Language Models</td><td>2023</td><td><a href="https://arxiv.org/pdf/2312.07533" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td></tr><tr><td>SimVLM: Simple Visual Language Model Pretraining with Weak Supervision</td><td>2021</td><td><a href="https://arxiv.org/pdf/2108.10904" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>LoRA: Low-Rank Adaptation of Large Language Models</td><td>2021</td><td><a href="https://arxiv.org/pdf/2106.09685" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/microsoft/LoRA" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>QLoRA: Efficient Finetuning of Quantized LLMs</td><td>2023</td><td><a href="https://arxiv.org/pdf/2305.14314" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback</td><td>2022</td><td><a href="https://arxiv.org/pdf/2204.05862" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/anthropics/hh-rlhf" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>RLAIF vs. RLHF: Scaling Reinforcement Learning from Human Feedback with AI Feedback</td><td>2023</td><td><a href="https://arxiv.org/pdf/2309.00267" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
<div class="subsection" id="s4-7">
<button class="sub-toggle" onclick="toggleSection(this)" aria-expanded="false">
<span>Scarce of High-quality Dataset <span class="badge-sm">7</span></span>
<svg class="chevron" width="20" height="20" viewBox="0 0 20 20" fill="none"><path d="M5 7.5L10 12.5L15 7.5" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>
</button>
<div class="sub-content"><div class="table-wrap"><table><thead><tr><th>Title</th><th>Year</th><th>Paper</th><th>Website</th><th>Code</th></tr></thead><tbody><tr><td>A Survey on Bridging VLMs and Synthetic Data</td><td>2025</td><td><a href="https://openreview.net/pdf?id=ThjDCZOljE" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/mghiasvand1/Awesome-VLM-Synthetic-Data/" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Inst-IT: Boosting Multimodal Instance Understanding via Explicit Visual Prompt Instruction Tuning</td><td>2024</td><td><a href="https://arxiv.org/abs/2412.03565" target="_blank" rel="noopener">📄 Paper</a></td><td><a href="https://inst-it.github.io/" target="_blank" rel="noopener">Website</a></td><td><a href="https://github.com/inst-it/inst-it" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>SLIP: Self-supervision meets Language-Image Pre-training</td><td>2021</td><td><a href="https://arxiv.org/pdf/2112.12750" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td><a href="https://github.com/facebookresearch/SLIP" target="_blank" rel="noopener">💾 Code</a></td></tr><tr><td>Synthetic Vision: Training Vision-Language Models to Understand Physics</td><td>2024</td><td><a href="https://arxiv.org/pdf/2412.08619" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Synth2: Boosting Visual-Language Models with Synthetic Captions and Image Embeddings</td><td>2024</td><td><a href="https://arxiv.org/pdf/2403.07750" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>KALIE: Fine-Tuning Vision-Language Models for Open-World Manipulation without Robot Data</td><td>2024</td><td><a href="https://arxiv.org/pdf/2409.14066" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr><tr><td>Web Agents with World Models: Learning and Leveraging Environment Dynamics in Web Navigation</td><td>2024</td><td><a href="https://arxiv.org/pdf/2410.13232" target="_blank" rel="noopener">📄 Paper</a></td><td>-</td><td>-</td></tr></tbody></table></div></div>
</div>
</div>
</section>
</main>
</div>
<!-- FOOTER -->
<div class="footer">
<p>Built from the <a href="https://arxiv.org/abs/2501.02189" target="_blank">VLM Survey</a> — Li et al., CVPR 2025 Workshop — Last updated April 2026</p>
</div>
<button class="back-top" id="backTop" onclick="window.scrollTo({top:0,behavior:'smooth'})"><i class="fas fa-arrow-up"></i></button>
<script>
/* Toggle subsection */
function toggleSection(btn){
const expanded = btn.getAttribute('aria-expanded') === 'true';
btn.setAttribute('aria-expanded', !expanded);
btn.nextElementSibling.classList.toggle('open');
}
/* Expand / Collapse all */
let allExpanded = false;
function toggleAll(){
allExpanded = !allExpanded;
document.querySelectorAll('.sub-toggle').forEach(b => {
b.setAttribute('aria-expanded', allExpanded);
b.nextElementSibling.classList.toggle('open', allExpanded);
});
document.getElementById('expandBtn').textContent = allExpanded ? 'Collapse All' : 'Expand All';
}
/* Search */
const searchInput = document.getElementById('searchInput');
const searchInfo = document.getElementById('searchInfo');
let debounce;
searchInput.addEventListener('input', () => {
clearTimeout(debounce);
debounce = setTimeout(doSearch, 200);
});
function doSearch(){
const q = searchInput.value.trim().toLowerCase();
const rows = document.querySelectorAll('tbody tr');
let shown = 0, total = rows.length;
rows.forEach(r => {
if(!q){
r.classList.remove('hidden');
shown++;
} else {
const text = r.textContent.toLowerCase();
const match = text.includes(q);
r.classList.toggle('hidden', !match);
if(match) shown++;
}
});
if(q){
searchInfo.textContent = `${shown} of ${total} entries`;
/* auto-expand subsections with matches */
document.querySelectorAll('.subsection').forEach(sub => {
const vis = sub.querySelectorAll('tbody tr:not(.hidden)').length;
if(vis > 0){
const btn = sub.querySelector('.sub-toggle');
btn.setAttribute('aria-expanded','true');
sub.querySelector('.sub-content').classList.add('open');
}
});
} else {
searchInfo.textContent = '';
}
}
/* Active nav */
const navLinks = document.querySelectorAll('.nav-link');
const sectionEls = document.querySelectorAll('.section');
const backTop = document.getElementById('backTop');
window.addEventListener('scroll', () => {
backTop.classList.toggle('show', window.scrollY > 400);
let current = '';
sectionEls.forEach(s => {
if(window.scrollY >= s.offsetTop - 200) current = s.id;
});
navLinks.forEach(l => {
const isActive = l.getAttribute('href') === '#' + current;
l.classList.toggle('active', isActive);
if(isActive) l.style.borderLeftColor = l.dataset.color;
else l.style.borderLeftColor = 'transparent';
});
}, {passive: true});
</script>
</body>
</html>