[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ChaofanTao--Autoregressive-Models-in-Vision-Survey":3,"tool-ChaofanTao--Autoregressive-Models-in-Vision-Survey":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",151314,2,"2026-04-11T23:32:58",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":76,"owner_website":79,"owner_url":80,"languages":76,"stars":81,"forks":82,"last_commit_at":83,"license":76,"difficulty_score":84,"env_os":85,"env_gpu":86,"env_ram":86,"env_deps":87,"category_tags":90,"github_topics":93,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":108,"updated_at":109,"faqs":110,"releases":121},6833,"ChaofanTao\u002FAutoregressive-Models-in-Vision-Survey","Autoregressive-Models-in-Vision-Survey"," [TMLR 2025🔥] A survey for the autoregressive models in vision. ","Autoregressive-Models-in-Vision-Survey 是一个专注于视觉领域自回归模型（Autoregressive Models）的学术综述资源库。它系统性地梳理了该领域的最新研究进展，通过建模序列依赖关系，展示了自回归方法在生成高质量图像和视频内容方面的显著突破。\n\n该项目主要解决了研究人员在面对海量且快速迭代的自回归视觉论文时，难以高效获取核心观点和分类整理的痛点。它将分散的学术成果汇聚成一份精心策划的清单，涵盖了从基础理论到前沿应用的各类重要文献，并提供了详细的论文链接及中文解读资源，极大地降低了入门和追踪前沿的技术门槛。\n\n这份资源特别适合人工智能研究人员、计算机视觉开发者以及对生成式 AI 感兴趣的高校师生使用。无论是希望深入了解自回归机制如何应用于视觉任务，还是寻找特定方向的参考文献，都能从中获得极大帮助。其独特的技术亮点在于不仅收录了传统自回归模型，还敏锐地捕捉到了“统一多模态模型”和“自回归扩散强制视频生成”等新兴趋势。虽然项目目前已进入维护模式以反映领域的新变化，但它依然作为一份权威的参考指南，持续为社区提供有价值的知识沉淀。","\u003Cdiv align=center>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_22f7bed6b2c1.png\" width=\"160px\">\n\u003C\u002Fdiv>\n\u003Ch2 align=\"center\"> \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902\">[TMLR 2025] Awesome Autoregressive Models in Vision  \u003Cdiv align=center> \u003C\u002Fa>\u003C\u002Fh2>\n\u003Ch5 align=\"center\"> If you like our project, please give us a star ⭐ on GitHub for the latest update.\u003C\u002Fh5>\n\n\u003Ch5 align=\"center\">\n\n   [![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n   [![arxiv](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FArxiv-2411.05902-red)](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.05902.pdf)\n   [![TechBeat](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F机器之心%20-black)](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F_O8W1qgvMZu37IKwgtskMA)\n   ![GitHub Repo stars](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey)\n\n\u003C\u002Fh5>\n\nAutoregressive models have shown significant progress in generating high-quality content by modeling the dependencies sequentially. This repo is a curated list of papers about the latest advancements in autoregressive models in vision.\n\n> **Paper**: [[TMLR 2025🔥]](https:\u002F\u002Fopenreview.net\u002Fforum?id=1BqXkjNEGP) [Autoregressive Models in Vision: A Survey](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902) | [[中文解读]](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F_O8W1qgvMZu37IKwgtskMA)\n\n> **Authors**: *Jing Xiong\u003Csup>1,†\u003C\u002Fsup>, Gongye Liu\u003Csup>2,†\u003C\u002Fsup>, Lun Huang\u003Csup>3\u003C\u002Fsup>, Chengyue Wu\u003Csup>1\u003C\u002Fsup>, Taiqiang Wu\u003Csup>1\u003C\u002Fsup>, Yao Mu\u003Csup>1\u003C\u002Fsup>, Yuan Yao\u003Csup>4\u003C\u002Fsup>, Hui Shen\u003Csup>5\u003C\u002Fsup>, Zhongwei Wan\u003Csup>5\u003C\u002Fsup>, Jinfa Huang\u003Csup>4\u003C\u002Fsup>, Chaofan Tao\u003Csup>1,‡\u003C\u002Fsup>, Shen Yan\u003Csup>6\u003C\u002Fsup>, Huaxiu Yao\u003Csup>7\u003C\u002Fsup>, Lingpeng Kong\u003Csup>1\u003C\u002Fsup>, Hongxia Yang\u003Csup>9\u003C\u002Fsup>, Mi Zhang\u003Csup>5\u003C\u002Fsup>, Guillermo Sapiro\u003Csup>8,10\u003C\u002Fsup>, Jiebo Luo\u003Csup>4\u003C\u002Fsup>, Ping Luo\u003Csup>1\u003C\u002Fsup>, Ngai Wong\u003Csup>1\u003C\u002Fsup>*\n\n> *\u003Csup>1\u003C\u002Fsup>The University of Hong Kong, \u003Csup>2\u003C\u002Fsup>Tsinghua University, \u003Csup>3\u003C\u002Fsup>Duke University, \u003Csup>4\u003C\u002Fsup>University of Rochester, \u003Csup>5\u003C\u002Fsup>The Ohio State University, \u003Csup>6\u003C\u002Fsup>Bytedance, \u003Csup>7\u003C\u002Fsup>The University of North Carolina at Chapel Hill, \u003Csup>8\u003C\u002Fsup>Apple, \u003Csup>9\u003C\u002Fsup>The Hong Kong Polytechnic University, \u003Csup>10\u003C\u002Fsup>Princeton University*\n\n> *\u003Csup>†\u003C\u002Fsup> Core Contributors, \u003Csup>‡\u003C\u002Fsup> Corresponding Authors*\n\n\u003Cbr>\n\n\u003Cdetails open>\u003Csummary>💡 We also have other generative projects that may interest you ✨. \u003C\u002Fsummary>\u003Cp>\n\u003C!--  may -->\n\n> [**Personalized Video Generation: Progress, Applications, and Challenges**]() \u003Cbr>\n> Jinfa Huang, Shenghai Yuan, Kunyang Li, and Meng Cao etc. \u003Cbr>\n[![github](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Github-black?logo=github)](https:\u002F\u002Fgithub.com\u002FinFaaa\u002FAwesome-Personalized-Video-Creation)  [![github](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FinFaaa\u002FAwesome-Personalized-Video-Creation.svg?style=social)](https:\u002F\u002Fgithub.com\u002FinFaaa\u002FAwesome-Personalized-Video-Creation) \u003Cbr>\n\n> \u003C\u002Fp>\u003C\u002Fdetails>\n\n\n## 📑 Citation\nPlease consider citing 📑 our papers if our repository is helpful to your work. Thanks sincerely!\n\n```BibTeX\n@misc{xiong2024autoregressive,\n    title={Autoregressive Models in Vision: A Survey},\n    author={Jing Xiong and Gongye Liu and Lun Huang and Chengyue Wu and Taiqiang Wu and Yao Mu and Yuan Yao and Hui Shen and Zhongwei Wan and Jinfa Huang and Chaofan Tao and Shen Yan and Huaxiu Yao and Lingpeng Kong and Hongxia Yang and Mi Zhang and Guillermo Sapiro and Jiebo Luo and Ping Luo and Ngai Wong},\n    year={2024},\n    eprint={2411.05902},\n    archivePrefix={arXiv},\n    primaryClass={cs.CV}\n}\n```\n\n## 📣 Update News\n\n`[2025-11-01]` ⏸️ After a year of rapid progress in autoregressive visual generation, two clear trends now define the field: **unified multimodal models** and **autoregressive diffusion-forcing video generation**. Our current repository categories no longer capture this evolving landscape, so we’re moving to maintenance mode and **pausing proactive updates as of today**. The repo remains available as a reference, and **targeted PRs are welcome** (additions, corrections, or reorganizations with new trends). Thanks for your support! 🙏\n\n`[2025-05-31]` 🔥 Our survey has been revised in [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902)! The revised paper streamlines content and enhances discussions on:\n   - Continuous autoregressive methods\n   - Computational costs\n   - More details about metrics\n   - Expands future application roadmaps\n\n`[2025-03-11]` 🔥 Our [survey](https:\u002F\u002Fopenreview.net\u002Fforum?id=1BqXkjNEGP) has been accepted by TMLR 2025!\n\n`[2024-11-11]` We have released the survey: [Autoregressive Models in Vision: A Survey](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902).\n\n`[2024-10-13]` We have initialed the repository.\n\n\u003Cdiv align=center>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_15bbafc53273.png\" width=\"800px\">\n\u003C\u002Fdiv>\n\n\n## ⚡ Contributing\n\nWe welcome feedback, suggestions, and contributions that can help improve this survey and repository and make them valuable resources for the entire community.\nWe will actively maintain this repository by incorporating new research as it emerges. If you have any suggestions about our taxonomy, please take a look at any missed papers, or update any preprint arXiv paper that has been accepted to some venue.\n\n If you want to add your work or model to this list, please do not hesitate to email jhuang90@ur.rochester.edu or [pull requests]([https:\u002F\u002Fgithub.com\u002FChaofanTao\u002Fautoregressive-vision-survey\u002Fpulls](https:\u002F\u002Fgithub.com\u002FChaofanTao\u002Fautoregressive-vision-survey\u002Fpulls)). Markdown format:\n\n```markdown\n* [**Name of Conference or Journal + Year**] Paper Name. [Paper](link) [Code](link)\n```\n\n\n## 📖 Table of Contents\n  - [Image Generation](#image-generation)\n    - [Unconditional\u002FClass-Conditioned Image Generation](#unconditionalclass-conditioned-image-generation)\n    - [Text-to-Image Generation](#text-to-image-generation)\n    - [Image-to-Image Translation](#image-to-image-translation)\n    - [Image Editing](#image-editing)\n  - [Video Generation](#video-generation)\n    - [Unconditional Video Generation](#unconditional-video-generation)\n    - [Conditional Video Generation](#conditional-video-generation)\n    - [Embodied AI](#embodied-ai)\n  - [3D Generation](#3d-generation)\n    - [Motion Generation](#motion-generation)\n    - [Point Cloud Generation](#point-cloud-generation)\n    - [3D Medical Generation](#3d-medical-generation)\n  - [Multimodal Generation](#multimodal-generation)\n    - [Unified Understanding and Generation Multi-Modal LLMs](#unified-understanding-and-generation-multi-modal-llms)\n  - [Other Generation](#other-generation)\n  - [Benchmark \u002F Analysis](#benchmark--analysis)\n  - [Reasoning Alignment](#reasoning-alignment)\n  - [Safety](#safety)\n  - [Accelerating](#accelerating)\n  - [Stability \\& Scaling](#stability--scaling)\n  - [Tutorial](#tutorial)\n  - [Evaluation Metrics](#evaluation-metrics)\n-----\n\n### Image Generation\n#### Unconditional\u002FClass-Conditioned Image Generation\n  - ##### Pixel-wise Generation\n    - **[ICML, 2021 oral]** Improved Autoregressive Modeling with Distribution Smoothing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.15089) [Code](https:\u002F\u002Fgithub.com\u002Fchenlin9\u002FAutoregressive-Modeling-with-Distribution-Smoothing)\n    - **[ICML, 2020]** **ImageGPT:** Generative Pretraining from Pixels [Paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fchen20s\u002Fchen20s.pdf)\n    - **[ICML, 2018]** **Image Transformer** [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1802.05751) [Code](https:\u002F\u002Fgithub.com\u002Fneocxi\u002Fpixelsnail-public)\n    - **[ICML, 2018]** **PixelSNAIL:** An Improved Autoregressive Generative Model [Paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fchen18h\u002Fchen18h.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fneocxi\u002Fpixelsnail-public)\n    - **[ICML, 2017]** Parallel Multiscale Autoregressive Density Estimation [Paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv70\u002Freed17a.html)\n    - **[ICLR workshop, 2017]** **Gated PixelCNN**: Generating Interpretable Images with Controllable Structure [Paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=Hyvw0L9el)\n    - **[ICLR, 2017]** **PixelCNN++**: Improving the PixelCNN with Discretized Logistic Mixture Likelihood and Other Modifications [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1701.05517) [Code](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpixel-cnn)\n    - **[NeurIPS, 2016]** **PixelCNN** Conditional Image Generation with PixelCNN Decoders [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.05328) [Code](https:\u002F\u002Fgithub.com\u002Fanantzoid\u002FConditional-PixelCNN-decoder)\n    - **[ICML, 2016]** **PixelRNN** Pixel Recurrent Neural Networks [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1601.06759) [Code](https:\u002F\u002Fgithub.com\u002Fj-min\u002FPixelCNN)\n    \n  - ##### Token-wise Generation\n    \n    ##### Tokenizer\n    - **[Arxiv, 2025.07]** Vision Foundation Models as Effective Visual Tokenizers for Autoregressive Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.08441) [Code](https:\u002F\u002Fgithub.com\u002FCVMI-Lab\u002FVFMTok)\n    - **[Arxiv, 2025.07]** Holistic Tokenizer for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.02358) [Code](https:\u002F\u002Fgithub.com\u002FCVMI-Lab\u002FHita)\n    - **[Arxiv, 2025.06]** Instella-T2I: Pushing the Limits of 1D Discrete Latent Space Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21022) \n    - **[Arxiv, 2025.05]** D-AR: Diffusion via Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23660) [Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FD-AR)\n    - **[Arxiv, 2025.05]** Learning Adaptive and Temporally Causal Video Tokenization in a 1D Latent Space [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.17011) [Code](https:\u002F\u002Fgithub.com\u002FVisionXLab\u002FAdapTok)\n    - **[Arxiv, 2025.04]** Distilling semantically aware orders for autoregressive image generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.17069) \n    - **[Arxiv, 2025.04]** Token-Shuffle: Towards High-Resolution Image Generation with Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.17789)\n    - **[CVPR, 2025]** Improving Autoregressive Visual Generation with Cluster-Oriented Token Prediction [Code](https:\u002F\u002Fgithub.com\u002Fsjtuplayer\u002FIAR) [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.00880)\n    - **[Arxiv, 2025.03]** Equivariant Image Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18948) [Code](https:\u002F\u002Fgithub.com\u002Fdrx-code\u002FEquivariantModeling)\n    - **[Arxiv, 2025.03]** V2Flow: Unifying Visual Tokenization and Large Language Model Vocabularies for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07493) [Code](https:\u002F\u002Fgithub.com\u002Fzhangguiwei610\u002FV2Flow)\n    - **[Arxiv, 2025.02]** **FlexTok**: Resampling Images into 1D Token Sequences of Flexible Length [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13967) \n    - **[Arxiv, 2025.01]** **ARFlow**: Autogressive Flow with Hybrid Linear Attention [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.16085) [Code](https:\u002F\u002Fgithub.com\u002FTheFllood\u002FARFlow)\n    - **[Arxiv, 2024.12]** **TokenFlow**: Unified Image Tokenizer for Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03069) [Code](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FTokenFlow)\n    - **[Arxiv, 2024.12]** Next Patch Prediction for Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15321) [Code](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FNext-Patch-Prediction)\n    - **[Arxiv, 2024.12]** XQ-GAN: An Open-source Image Tokenization Framework for Autoregressive Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01762) [Code](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FImageFolder)\n    - **[Arxiv, 2024.12]** RandAR: Decoder-only Autoregressive Visual Generation in Random Orders. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01827) [Code](https:\u002F\u002Fgithub.com\u002Fziqipang\u002FRandAR) [Project](https:\u002F\u002Frand-ar.github.io\u002F)\n    - **[Arxiv, 2024.11]** Randomized Autoregressive Visual Generation. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00776) [Code](https:\u002F\u002Fgithub.com\u002Fbytedance\u002F1d-tokenizer) [Project](https:\u002F\u002Fyucornetto.github.io\u002Fprojects\u002Frar.html)\n    - **[Arxiv, 2024.09]** **Open-MAGVIT2**: Democratizing Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.04410) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FOpen-MAGVIT2)\n    - **[Arxiv, 2024.06]** **OmniTokenizer**: A Joint Image-Video Tokenizer for Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09399) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FOmniTokenizer)\n    - **[Arxiv, 2024.06]** Scaling the Codebook Size of VQGAN to 100,000 with a Utilization Rate of 99% [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11837) [Code](https:\u002F\u002Fgithub.com\u002Fzh460045050\u002FVQGAN-LC)\n    - **[Arxiv, 2024.06]** **Titok** An Image is Worth 32 Tokens for Reconstruction and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07550) [Code](https:\u002F\u002Fgithub.com\u002Fbytedance\u002F1d-tokenizer)\n    - **[Arxiv, 2024.06]** Wavelets Are All You Need for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19997) \n    - **[Arxiv, 2024.06]** **LlamaGen** Autoregressive Model Beats Diffusion: Llama for Scalable Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.06525) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLlamaGen)\n    - **[ICLR, 2024]**  **MAGVIT-v2** Language Model Beats Diffusion -- Tokenizer is Key to Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05737)\n    - **[ICLR, 2024]** **FSQ** Finite scalar quantization: Vq-vae made simple [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15505) [Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Ffsq)\n    - **[ICCV, 2023]** **Efficient-VQGAN:** Towards High-Resolution Image Generation with Efficient Vision Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05400)\n    - **[CVPR, 2023]** Towards Accurate Image Coding: Improved Autoregressive Image Generation with Dynamic Vector Quantization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.11718) [Code](https:\u002F\u002Fgithub.com\u002FCrossmodalGroup\u002FDynamicVectorQuantization)\n    - **[CVPR, 2023, Highlight]**  **MAGVIT:** Masked Generative Video Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n    - **[NeurIPS, 2023]**  **MoVQ:** Modulating Quantized Vectors for High-Fidelity Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.09002)\n    - **[BMVC, 2022]**  Unconditional image-text pair generation with multimodal cross quantizer  [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07537) [Code](https:\u002F\u002Fgithub.com\u002Fttumyche\u002FMXQ-VAE)\n    - **[CVPR, 2022]** **RQ-VAE** Autoregressive Image Generation Using Residual Quantization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.01941) [Code](https:\u002F\u002Fgithub.com\u002Fkakaobrain\u002Frq-vae-transformer)\n    - **[ICLR, 2022]** **ViT-VQGAN** Vector-quantized Image Modeling with Improved VQGAN [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.04627) \n    - **[PMLR, 2021]** Generating images with sparse representations [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.03841)\n    - **[CVPR, 2021]** **VQGAN** Taming Transformers for High-Resolution Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09841) [Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Ftaming-transformers)\n    - **[NeurIPS, 2019]** Generating Diverse High-Fidelity Images with VQ-VAE-2 [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.00446) [Code](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fvq-vae-2-pytorch)\n    - **[NeurIPS, 2017]** **VQ-VAE** Neural Discrete Representation Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.00937)\n\n    \n    ##### Autoregressive Modeling\n    - **[Arxiv, 2025.11]** InfinityStar: Unified Spacetime AutoRegressive Modeling for Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.04675) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinityStar)\n    - **[Arxiv, 2025.10]** FARMER: Flow AutoRegressive Transformer over Pixels [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.23588) \n    - **[Arxiv, 2025.10]** SSD: Spatial-Semantic Head Decoupling for Efficient Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.18716) \n    - **[NeurIPS, 2025]** Visual Autoregressive Models Beat Diffusion Models on Inference Time Scaling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16751) \n    - **[NeurIPS, 2025]** Towards Better & Faster Autoregressive Image Generation: From the Perspective of Entropy [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.09012) [Code](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FARsample)\n    - **[Arxiv, 2025.09]** Hyperspherical Latents Improve Continuous-Token Autoregressive Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.24335) [Code](https:\u002F\u002Fgithub.com\u002Fguolinke\u002FSphereAR)\n    - **[Arxiv, 2025.09]** Go with Your Gut: Scaling Confidence for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26376) [Code](https:\u002F\u002Fgithub.com\u002FEnVision-Research\u002FScalingAR)\n    - **[NeurIPS, 2025]** Understand Before You Generate: Self-Guided Training for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.15185) [Code](https:\u002F\u002Fgithub.com\u002Fyuexy\u002FST-AR)\n    - **[Arxiv, 2025.08]** Exploiting Discriminative Codebook Prior for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.10719) \n    - **[Arxiv, 2025.08]** NextStep-1: Toward Autoregressive Image Generation with Continuous Tokens at Scale [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.10711) [Code](https:\u002F\u002Fgithub.com\u002Fstepfun-ai\u002FNextStep-1)\n    - **[Arxiv, 2025.07]** Frequency-Aware Autoregressive Modeling for Efficient High-Resolution Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.20454) [Code](https:\u002F\u002Fgithub.com\u002FCaesarhhh\u002FSparseVAR)\n    - **[Arxiv, 2025.07]** TTS-VAR: A Test-Time Scaling Framework for Visual Auto-Regressive Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.18537) [Code](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FTTS-VAR)\n    - **[Arxiv, 2025.07]** Transition Matching: Scalable and Flexible Generative Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23589)\n    - **[Arxiv, 2025.07]** Rethinking Discrete Tokens: Treating Them as Conditions for Continuous Autoregressive Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.01756) \n    - **[CVPR, 2025]** OmniGen: Unified Image Generation [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FXiao_OmniGen_Unified_Image_Generation_CVPR_2025_paper.html) [Code](https:\u002F\u002Fgithub.com\u002FVectorSpaceLab\u002FOmniGen)\n    - **[Arxiv, 2025.06]** AR-RAG: Autoregressive Retrieval Augmentation for Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.06962) [Code](https:\u002F\u002Fgithub.com\u002FPLUM-Lab\u002FAR-RAG)\n    - **[Arxiv, 2025.06]** Marrying Autoregressive Transformer and Diffusion with Multi-Reference Autoregression [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09482) [Code](https:\u002F\u002Fgithub.com\u002FTransDiff\u002FTransDiff)\n    - **[Arxiv, 2025.06]** MADFormer: Mixed Autoregressive and Diffusion Transformers for Continuous Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.07999) \n    - **[Arxiv, 2025.06]** SpectralAR: Spectral Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.10962) [Code](https:\u002F\u002Fgithub.com\u002Fhuang-yh\u002FSpectralAR)\n    - **[Arxiv, 2025.06]** AliTok: Towards Sequence Modeling Alignment between Tokenizer and Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.05289) [Code](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002Falitok)\n    - **[Arxiv, 2025.05]** DetailFlow: 1D Coarse-to-Fine Autoregressive Image Generation via Next-Detail Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.21473) [Code](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FDetailFlow)\n    - **[Arxiv, 2025.05]** TensorAR: Refinement is All You Need in Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16324)\n    - **[Arxiv, 2025.05]** MVAR: Visual Autoregressive Modeling with Scale and Spatial Markovian Conditioning [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.12742) [Code](https:\u002F\u002Fgithub.com\u002FLabShuHangGU\u002FMVAR)\n    - **[ICML, 2025]** Continuous Visual Autoregressive Generation via Score Maximization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07812) [Code](https:\u002F\u002Fgithub.com\u002Fshaochenze\u002FEAR)\n    - **[Arxiv, 2025.04]** GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.08736) [Code](https:\u002F\u002Fgithub.com\u002FSilentView\u002FGigaTok)\n    - **[Arxiv, 2025.03]** D2C: Unlocking the Potential of Continuous Autoregressive Image Generation with Discrete Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17155) \n    - **[Arxiv, 2025.03]** Bridging Continuous and Discrete Tokens for Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.16430) [Code](https:\u002F\u002Fgithub.com\u002Fyuqingwang1029\u002FTokenBridge)\n    - **[Arxiv, 2025.03]** Autoregressive Image Generation with Randomized Parallel Decoding [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.10568) [Code](https:\u002F\u002Fgithub.com\u002Fhp-l33\u002FARPG)\n    - **[Arxiv, 2025.03]** Direction-Aware Diagonal Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11129)\n    - **[Arxiv, 2025.03]** Neighboring Autoregressive Modeling for Efficient Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10696) [Code](https:\u002F\u002Fgithub.com\u002FThisisBillhe\u002FNAR)\n    - **[Arxiv, 2025.03]** NFIG: Autoregressive Image Generation with Next-Frequency Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07076) \n    - **[Arxiv, 2025.03]** Frequency Autoregressive Image Generation with Continuous Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.05305) [Code](https:\u002F\u002Fgithub.com\u002FyuhuUSTC\u002FFAR)\n    - **[Arxiv, 2025.03]** ARINAR: Bi-Level Autoregressive Feature-by-Feature Generative Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02883) [Code](https:\u002F\u002Fgithub.com\u002FQinyu-Allen-Zhao\u002FArinar)\n    - **[Arxiv, 2025.02]** Beyond Next-Token: Next-X Prediction for Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20388) [Code](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FxAR) [Project](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20388)\n    - **[Arxiv, 2025.02]** Fractal Generative Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17437) [Code](https:\u002F\u002Fgithub.com\u002FLTH14\u002Ffractalgen)\n    - **[Arxiv, 2025.01]** An Empirical Study of Autoregressive Pre-training from Videos [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.05453) \n    - **[Arxiv, 2024.12]** E-CAR: Efficient Continuous Autoregressive Image Generation via Multistage Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14170)\n    - **[Arxiv, 2024.12]** Taming Scalable Visual Tokenizer for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02692) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Voken)\n    - **[Arxiv, 2024.11]** Sample- and Parameter-Efficient Auto-Regressive Image Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.15648) [Code](https:\u002F\u002Fgithub.com\u002Felad-amrani\u002Fxtra)\n    - **[Arxiv, 2024.01]** Scalable Pre-training of Large Autoregressive Image Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08541) [Code](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-aim)\n    - **[Arxiv, 2024.10]** ImageFolder: Autoregressive Image Generation with Folded Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01756) [Code](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FImageFolder)\n    - **[Arxiv, 2024.10]** **SAR** Customize Your Visual Autoregressive Recipe with Set Autoregressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10511) [Code](https:\u002F\u002Fgithub.com\u002Fpoppuppy\u002FSAR)\n    - **[Arxiv, 2024.08]** **AiM** Scalable Autoregressive Image Generation with Mamba [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12245) [Code](https:\u002F\u002Fgithub.com\u002Fhp-l33\u002FAiM)\n    - **[Arxiv, 2024.06]** **ARM** Autoregressive Pretraining with Mamba in Vision [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07537) [Code](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FARM)\n    - **[Arxiv, 2024.06]** **MAR** Autoregressive Image Generation without Vector Quantization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11838) [Code](https:\u002F\u002Fgithub.com\u002FLTH14\u002Fmar)\n    - **[Arxiv, 2024.06]** **LlamaGen** Autoregressive Model Beats Diffusion: Llama for Scalable Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.06525) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLlamaGen)\n    - **[ICML, 2024]** **DARL**: Denoising Autoregressive Representation Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.05196) \n    - **[ICML, 2024]** **DisCo-Diff**: Enhancing Continuous Diffusion Models with Discrete Latents [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03300) [Code](https:\u002F\u002Fgithub.com\u002Fgcorso\u002Fdisco-diffdock)\n    - **[ICML, 2024]** **DeLVM**: Data-efficient Large Vision Models through Sequential Autoregression [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04841) [Code](https:\u002F\u002Fgithub.com\u002Fggjy\u002FDeLVM)\n    - **[AAAI, 2023]** **SAIM** Exploring Stochastic Autoregressive Image Modeling for Visual Representation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.01610) [Code](https:\u002F\u002Fgithub.com\u002Fqiy20\u002FSAIM)\n    - **[NeurIPS, 2021]** **ImageBART**: Context with Multinomial Diffusion for Autoregressive Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.08827) [Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fimagebart)\n    - **[CVPR, 2021]** **VQGAN** Taming Transformers for High-Resolution Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09841)  [Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Ftaming-transformers)\n    - **[ECCV, 2020]** **RAL**: Incorporating Reinforced Adversarial Learning in Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.09923)\n    - **[NeurIPS, 2019]** Generating Diverse High-Fidelity Images with VQ-VAE-2 [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.00446) [Code](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fvq-vae-2-pytorch)\n    - **[NeurIPS, 2017]** **VQ-VAE** Neural Discrete Representation Learning[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.00937)\n    \n  - ##### Scale-wise AutoRegressive Generation\n    - **[Arxiv, 2025.10]** Dynamic Mixture-of-Experts for Visual Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08629)\n    - **[Arxiv, 2025.09]** SoftCFG: Uncertainty-guided Stable Guidance for Visual Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.00996) \n    - **[Arxiv, 2025.09]** Not All Tokens are Guided Equal: Improving Guidance in Visual Autoregressive Models [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.23876)\n    - **[Arxiv, 2025.09]** Scale-Wise VAR is Secretely Discrete Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.22636) [Code](https:\u002F\u002Fgithub.com\u002FVIROBO-15\u002FSRDD) [Project](https:\u002F\u002Fvirobo-15.github.io\u002Fsrdd.github.io\u002F)\n    - **[Arxiv, 2025.05]** Generative Autoregressive Transformers for Model-Agnostic Federated MRI Reconstruction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04521) [Code](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FFedGAT)\n    - **[ICML, 2025]** Continuous Visual Autoregressive Generation via Score Maximization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07812) [Code](https:\u002F\u002Fgithub.com\u002Fshaochenze\u002FEAR)\n    - **[Arxiv, 2025.02]** **FlexVAR**: Flexible Visual Autoregressive Modeling without Residual Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20313) [Code](https:\u002F\u002Fgithub.com\u002Fjiaosiyu1999\u002FFlexVAR)\n    - **[Arxiv, 2024.12]** **FlowAR**: Scale-wise Autoregressive Image Generation Meets Flow Matching [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15205) [Code](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FFlowAR)\n    - **[Arxiv, 2024.11]** **M-VAR**: Decoupled Scale-wise Autoregressive Modeling for High-Quality Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.10433) [Code](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FMVAR)\n    - **[NeurIPS 2024 Best Paper]** **Visual Autoregressive Modeling:** Scalable Image Generation via Next-Scale Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02905) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR)\n\n#### Text-to-Image Generation\n- ##### Token-wise Generation\n     - **[ICML, 2025]** Discrete JEPA: Learning Discrete Token Representations without Reconstruction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.14373) \n     - **[Arxiv, 2025.04]** Lumina-mGPT 2.0: Stand-alone Autoregressive Image Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17801) [Code](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-mGPT-2.0)\n     - **[Arxiv, 2025.03]** Lumina-Image 2.0: A Unified and Efficient Image Generative Framework [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21758?) [Code](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-Image-2.0)\n     - **[Arxiv, 2024.12]** Liquid: Language Models are Scalable Multi-modal Generators [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04332) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLiquid)\n     - **[Arxiv, 2024.12]** Infinity: Scaling Bitwise AutoRegressive Modeling for High-Resolution Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04431) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinity)\n     - **[Arxiv, 2024.12]** TokenFlow: Unified Image Tokenizer for Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03069) [Code](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FTokenFlow)\n     - **[Arxiv, 2024.11]** High-Resolution Image Synthesis via Next-Token Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.14808) [Code](https:\u002F\u002Fd-jepa.github.io\u002Ft2i\u002F)\n     - **[Arxiv, 2024.10]** **Fluid**: Scaling Autoregressive Text-to-image Generative Models with Continuous Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.13863)\n     - **[Arxiv, 2024.10]** **DART**: Denoising Autoregressive Transformer for Scalable Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.08159) [Code](https:\u002F\u002Fgithub.com\u002Fdaixiangzi\u002FVAR-CLIP)\n     - **[Arxiv, 2024.10]** **DnD-Transformer**: A Spark of Vision-Language Intelligence: 2-Dimensional Autoregressive Transformer for Efficient Fine-grained Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01912) [Code](https:\u002F\u002Fgithub.com\u002Fchenllliang\u002FDnD-Transformer)\n     - **[Arxiv, 2024.08]** **Lumina-mGPT**: Illuminate Flexible Photorealistic Text-to-Image Generation with Multimodal Generative Pretraining [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.02657) [Code](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-mGPT)\n     - **[Arxiv, 2024.07]** **MARS**: Mixture of Auto-Regressive Models for Fine-grained Text-to-image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07614) [Code](https:\u002F\u002Fgithub.com\u002Ffusiming3\u002FMARS)\n     - **[Arxiv, 2024.06]** **LLM4GEN**: Leveraging Semantic Representation of LLMs for Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00737) [Code](https:\u002F\u002Fgithub.com\u002FYUHANG-Ma\u002FLLM4GEN)\n     - **[Arxiv, 2024.06]** **STAR**: Scale-wise Text-to-image generation via Auto-Regressive representations [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10797) [Code](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAR)\n     - **[Arxiv, 2024.05]** **Kaleido Diffusion**: Improving Conditional Diffusion Models with Autoregressive Latent Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.21048)\n     - **[CVPR, 2024]** **Beyond Text**: Frozen Large Language Models in Visual Signal Comprehension [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07874) [Code](https:\u002F\u002Fgithub.com\u002Fzh460045050\u002FV2L-Tokenizer)\n     - **[TOG, 2023]** **IconShop**: Text-Guided Vector Icon Synthesis with Autoregressive Transformers (*svg image*) [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.14400) [Code](https:\u002F\u002Fgithub.com\u002Fkingnobro\u002FIconShop)\n     - **[NeurIPS, 2023]** **LQAE** Language Quantized AutoEncoders: Towards Unsupervised Text-Image Alignment [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.00902) [Code](https:\u002F\u002Fgithub.com\u002Flhao499\u002Flanguage-quantized-autoencoders)\n     - **[TMLR, 2022.06]** **Parti**: Scaling Autoregressive Models for Content-Rich Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10789) [Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fparti)\n     - **[NeurIPS, 2022]** **CogView2**: Faster and Better Text-to-Image Generation via Hierarchical Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.14217) [Code](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogView2)\n     - **[ECCV, 2022]** **Make-A-Scene:** Scene-Based Text-to-Image Generation with Human Priors [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13131) \n     - **[CVPR, 2022]** **VQ-Diffusion:** Vector Quantized Diffusion Model for Text-to-Image Synthesis [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FGu_Vector_Quantized_Diffusion_Model_for_Text-to-Image_Synthesis_CVPR_2022_paper.html) [Code](https:\u002F\u002Fgithub.com\u002Fcientgu\u002FVQ-Diffusion)\n     - **[CVPR, 2022]** **Make-A-Story:** Visual Memory Conditioned Consistent Story Generation (*storytelling*) [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.13319) \n     - **[NeurIPS, 2021]** **CogView**: Mastering Text-to-Image Generation via Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.13290) [Code](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogView)\n     - **[Arxiv, 2021.02]** **DALL-E 1:** Zero-Shot Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.12092)\n \n- ##### Scale-wise Generation\n     - **[Arxiv, 2024.12]** Infinity: Scaling Bitwise AutoRegressive Modeling for High-Resolution Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04431) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinity)\n     - **[Arxiv, 2024.12]** **SWITTI**: Designing Scale-Wise Transformers for Text-to-Image Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01819) [Code](https:\u002F\u002Fgithub.com\u002Fyandex-research\u002Fswitti) [Page](https:\u002F\u002Fyandex-research.github.io\u002Fswitti\u002F)\n     - **[Arxiv, 2024.10]** **HART**: Efficient Visual Generation with Hybrid Autoregressive Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10812) [Code](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Fhart)\n     - **[Arxiv, 2024.08]** **VAR-CLIP**: Text-to-Image Generator with Visual Auto-Regressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.01181) [Code](https:\u002F\u002Fgithub.com\u002Fdaixiangzi\u002FVAR-CLIP)\n     - **[Arxiv, 2024.06]** **STAR**: Scale-wise Text-to-image generation via Auto-Regressive representations [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10797) [Code](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAR)\n  \n#### Image-to-Image Translation\n  - **[ICCV, 2025]** **CycleVAR**: Repurposing Autoregressive Model for Unsupervised One-Step Image Translation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23347v1) \n  - **[ICML Workshop, 2024]** **MIS** Many-to-many Image Generation with Auto-regressive Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.03109) \n  - **[Arxiv, 2024.03]** **SceneScript**: Reconstructing Scenes With An Autoregressive Structured Language Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13064) [Project](https:\u002F\u002Fwww.projectaria.com\u002Fscenescript\u002F)\n  - **[CVPR, 2024]** Sequential modeling enables scalable learning for large vision models [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FBai_Sequential_Modeling_Enables_Scalable_Learning_for_Large_Vision_Models_CVPR_2024_paper.html)\n  - **[ECCV, 2022]** **QueryOTR**: Outpainting by Queries [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.05312) [Code](https:\u002F\u002Fgithub.com\u002FKaiseem\u002FQueryOTR)\n  - **[NeurIPS, 2022]** Visual prompting via image inpainting [Paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Fhash\u002F9f09f316a3eaf59d9ced5ffaefe97e0f-Abstract-Conference.html)\n  - **[MM, 2021]** Diverse image inpainting with bidirectional and autoregressive transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12335)\n\n#### Image Editing\n  - **[Arxiv, 2025.09]** Discrete Noise Inversion for Next-scale Autoregressive Text-based Image Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01984v1) \n  - **[Arxiv, 2025.08]** Visual Autoregressive Modeling for Instruction-Guided Image Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.15772) [Code](https:\u002F\u002Fgithub.com\u002FHiDream-ai\u002FVAREdit) \n  - **[Arxiv, 2025.08]** NEP: Autoregressive Image Editing via Next Editing Token Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06044) [Code](https:\u002F\u002Fnep-bigai.github.io\u002F)\n  - **[Arxiv, 2025.07]** SCALAR: Scale-wise Controllable Visual Autoregressive Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.19946) \n  - **[Arxiv, 2025.04]** Anchor Token Matching: Implicit Structure Locking for Training-free AR Image Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.10434) [Code](https:\u002F\u002Fgithub.com\u002FhutaiHang\u002FATM)\n  - **[ICCV, 2025]** Training-Free Text-Guided Image Editing with Visual Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23897) [Code](https:\u002F\u002Fgithub.com\u002Fwyf0912\u002FAREdit)\n  - **[Arxiv, 2025.01]** EditAR: Unified Conditional Generation with Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.04699) [Code](https:\u002F\u002Fgithub.com\u002FJitengMu\u002FEditAR)\n  - **[Arxiv, 2024,06]** CAR: Controllable Autoregressive Modeling for Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.04671) [Code](https:\u002F\u002Fgithub.com\u002FMiracleDance\u002FCAR)\n  - **[ICLR, 2025]** **ControlAR**: Controllable Image Generation with Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02705) [Code](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FControlAR)\n  - **[Arxiv, 2024,06]** **ControlVAR**: Exploring Controllable Visual Autoregressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09750) [Code](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FControlVAR)\n  - **[Arxiv, 2024,06]** Medical Vision Generalist: Unifying Medical Imaging Tasks in Context [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565)\n  - **[Arxiv, 2024,04]** **M2M** Many-to-many Image Generation with Auto-regressive Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.03109)\n  - **[ECCV, 2022]** **VQGAN-CLIP:** Open Domain Image Generation and Editing with Natural Language Guidance [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.08583)\n  - **[ECCV, 2022]** **Make-A-Scene:** Scene-Based Text-to-Image Generation with Human Priors [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13131)\n  - **[ICIP, 2021]** **MSGNet:** Generating annotated high-fidelity images containing multiple coherent objects [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.12150)\n\n### Video Generation\n#### Unconditional Video Generation\n   - **[Arxiv, 2025.03]** FAR: Frame Autoregressive Model for Both Short- and Long-Context Video Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.19325) [Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FFAR)\n   - **[Arxiv, 2025.03]** HiTVideo: Hierarchical Tokenizers for Enhancing Text-to-Video Generation with Autoregressive Large Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11513) \n   - **[Arxiv, 2025.03]** AR-Diffusion: Asynchronous Video Generation with Auto-Regressive Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07418) [Code](https:\u002F\u002Fgithub.com\u002Fiva-mzsun\u002FAR-Diffusion)\n   - **[Arxiv, 2025.02]** Next Block Prediction: Video Generation via Semi-Autoregressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.07737) [Code](https:\u002F\u002Fgithub.com\u002FRenShuhuai-Andy\u002FNBP)\n   - **[Arxiv, 2025.01]** Taming Teacher Forcing for Masked Autoregressive Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12389) [Code](https:\u002F\u002Fmagivideogen.github.io\u002F)\n   - **[Arxiv, 2024.10]**  **LARP**: Tokenizing Videos with a Learned Autoregressive Generative Prior [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.21264)\n   - **[ECCV 2024]** **ST-LLM**: Large Language Models Are Effective Temporal Learners [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00308) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FST-LLM)\n   - **[ICLR, 2024]**  **MAGVIT-v2** Language Model Beats Diffusion -- Tokenizer is Key to Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05737)\n   - **[CVPR, 2023]** **PVDM** Video Probabilistic Diffusion Models in Projected Latent Space [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYu_Video_Probabilistic_Diffusion_Models_in_Projected_Latent_Space_CVPR_2023_paper.html)\n   - **[ECCV, 2022]**  Long Video Generation with Time-Agnostic VQGAN and Time-Sensitive Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.03638) [Code](https:\u002F\u002Fgithub.com\u002FSongweiGe\u002FTATS)\n   - **[Arxiv, 2021.04]** **VideoGPT**: Video generation using VQ-VAE and transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07143)\n   - **[Arxiv, 2020.06]** Latent Video Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.10704) [Code](https:\u002F\u002Fgithub.com\u002Frakhimovv\u002Flvt)\n   - **[ICLR, 2020]** Scaling Autoregressive Video Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.02634)\n   - **[CVPR, 2018]** **MoCoGAN**: Decomposing Motion and Content for Video Generation [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FTulyakov_MoCoGAN_Decomposing_Motion_CVPR_2018_paper.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fsergeytulyakov\u002Fmocogan)\n   - **[ICML, 2017]** Video Pixel Networks [Paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv70\u002Fkalchbrenner17a.html?ref=https:\u002F\u002Fgithubhelp.com)\n\n#### Conditional Video Generation\n   - ##### Text-to-Video Generation\n    - **[Arxiv, 2025.10]** Uniform Discrete Diffusion with Metric Path for Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.24717) [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FURSA) [Page](https:\u002F\u002Fbitterdhg.github.io\u002FURSA_page\u002F)\n     - **[Arxiv, 2025.10]** Autoregressive Video Generation beyond Next Frames Prediction [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.24081) \n     - **[Arxiv, 2025.07]** Lumos-1: On Autoregressive Video Generation from a Unified Model Perspective [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.08801) [Code](https:\u002F\u002Fgithub.com\u002Falibaba-damo-academy\u002FLumos)\n     - **[Arxiv, 2025.05]** Generative Pre-trained Autoregressive Diffusion Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07344) \n     - **[Arxiv, 2024.12]** **DiCoDe**: Diffusion-Compressed Deep Tokens for Autoregressive Video Generation with Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04446) [Page](https:\u002F\u002Fliyizhuo.com\u002FDiCoDe\u002F)\n     - **[Arxiv, 2024.11]** Ca2-VDM: Efficient Autoregressive Video Diffusion Model with Causal Generation and Cache Sharing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16375) [Code](https:\u002F\u002Fgithub.com\u002FDawn-LX\u002FCausalCache-VDM\u002F)\n     - **[Arxiv, 2024.10]**  **ARLON**: Boosting Diffusion Transformers with Autoregressive Models for Long Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20502)  [Code](http:\u002F\u002Faka.ms\u002Farlon)\n     - **[Arxiv, 2024.10]**  Progressive Autoregressive Video Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.08151) [Code](https:\u002F\u002Fgithub.com\u002Fdesaixie\u002Fpa_vdm)\n     - **[Arxiv, 2024.10]**  **Pyramid Flow**: Pyramidal Flow Matching for Efficient Video Generative Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05954) [Code](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FPyramid-Flow)\n     - **[Arxiv, 2024.10]**  **Loong**: Generating Minute-level Long Videos with Autoregressive Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.02757)\n     - **[Arxiv, 2024.06]**  **Pandora**: Towards General World Model with Natural Language Actions and Video States [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09455) [Code](https:\u002F\u002Fgithub.com\u002Fmaitrix-org\u002FPandora)\n     - **[Arxiv, 2024.06]** **iVideoGPT**: Interactive VideoGPTs are Scalable World Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223) [Code](https:\u002F\u002Fgithub.com\u002Fthuml\u002FiVideoGPT)\n     - **[Arxiv, 2024.06]** **ViD-GPT**: Introducing GPT-style Autoregressive Generation in Video Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10981) [Code](https:\u002F\u002Fgithub.com\u002FDawn-LX\u002FCausal-VideoGen)\n     - **[Arxiv, 2024.02]** **LWM** World Model on Million-Length Video And Language With Blockwise RingAttention [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.08268) [Code](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM)\n     - **[CVPR, 2024]** **ART-V**: Auto-Regressive Text-to-Video Generation with Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.18834)\n     - **[NeurIPS, 2022]** **NUWA-Infinity**: Autoregressive over Autoregressive Generation for Infinite Visual Synthesis [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09814) [Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n     - **[ECCV, 2022]** **NÜWA**: Visual Synthesis Pre-training for Neural visUal World creAtion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.12417) [Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n     - **[Arxiv, 2022.05]** **CogVideo**: Large-scale Pretraining for Text-to-Video Generation via Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.15868) [Code](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogVideo)\n     - **[Arxiv, 2022.05]** **GODIVA**: Generating Open-DomaIn Videos from nAtural Descriptions [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14806)\n     - **[IJCAI, 2021]** **IRC-GAN**: Introspective Recurrent Convolutional GAN for Text-to-video Generation. [Paper](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0307.pdf)\n    \n   - ##### Visual Conditional Video Generation\n     - **[Arxiv, 2025.06]** VideoMAR: Autoregressive Video Generation with Continuous Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.14168) [Code](https:\u002F\u002Fyuhuustc.github.io\u002F\u002Fprojects\u002FVideoMAR.html)\n     - **[Arxiv, 2025.06]** DeepVerse: 4D Autoregressive Video Generation as a World Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01103) [Code](https:\u002F\u002Fgithub.com\u002FSOTAMak1r\u002FDeepVerse)\n     - **[Arxiv, 2025.05]** Video-GPT via Next Clip Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.12489) [Code](https:\u002F\u002Fgithub.com\u002Fzhuangshaobin\u002FVideo-GPT)\n     - **[Arxiv, 2025.04]** GenDoP: Auto-regressive Camera Trajectory Generation as a Director of Photography [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.07083) [Code](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FGenDoP)\n     - **[Arxiv, 2024.10]**  **MarDini**: Masked Autoregressive Diffusion for Video Generation at Scale [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20502)\n     - **[CVPR, 2024]**  **LVM** Sequential Modeling Enables Scalable Learning for Large Vision Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00785) [Code](https:\u002F\u002Fgithub.com\u002Fytongbai\u002FLVM)\n     - **[ICIP, 2022]** **HARP**: Autoregressive Latent Video Prediction with High-Fidelity Image Generator [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07143)\n     - **[Arxiv, 2021.03]** Predicting Video with VQVAE [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01950)\n     - **[CVPR, 2021]** Stochastic Image-to-Video Synthesis using cINNs [Paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FDorkenwald_Stochastic_Image-to-Video_Synthesis_Using_cINNs_CVPR_2021_paper.pdf) [Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fimage2video-synthesis-using-cINNs)\n     - **[ICLR, 2019]** Eidetic 3d lstm: A model for video prediction and beyond [Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=B1lKS2AqtX)\n     - **[ICLR, 2018]** Stochastic variational video prediction [Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=rk49Mg-CW)\n     - **[NeurIPS, 2017]** **Predrnn**: Recurrent neural networks for predictive learning using spatiotemporal lstms [Paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2017\u002Ffile\u002Fe5f6ad6ce374177eef023bf5d0c018b6-Paper.pdf)\n     - **[NeurIPS, 2015]** Convolutional LSTM network: A machine learning approach for precipitation nowcasting [Paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2015\u002Ffile\u002F07563a3fe3bbe7e3ba84431ad9d055af-Paper.pdf)\n\n   - ##### Multimodal Conditional Video Generation\n      - **[Arxiv, 2025.01]** VideoAuteur: Towards Long Narrative Video Generation  [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.06173) [Code](https:\u002F\u002Fgithub.com\u002Flambert-x\u002FVideoAuteur)\n      - **[Arxiv, 2024.12]** Autoregressive Video Generation without Vector Quantization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14169) [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FNOVA)\n      - **[ICML, 2024]** **Video-LaVIT**:  Unified Video-Language Pre-training with Decoupled Visual-Motional Tokenization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.03161) [Code](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FLaVIT)\n      - **[ICML, 2024]** **VideoPoet**: A Large Language Model for Zero-Shot Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14125)\n      - **[CVPR, 2023]** **MAGVIT**: Masked Generative Video Transformer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n      - **[CVPR, 2022]** Make it move: controllable image-to-video generation with text descriptions [Paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FHu_Make_It_Move_Controllable_Image-to-Video_Generation_With_Text_Descriptions_CVPR_2022_paper.pdf) [Code](https:\u002F\u002Fgithub.com\u002FYouncy-Hu\u002FMAGE)\n\n#### AutoRegressive Diffusion-Forcing Video Generation\n  - **[Arxiv, 2025.10]** Real-Time Motion-Controllable Autoregressive Video Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08131) [Code](https:\u002F\u002Fkesenzhao.github.io\u002FAR-Drag.github.io\u002F)\n  - **[Arxiv, 2025.10]** Self-Forcing++: Towards Minute-Scale High-Quality Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.02283) [Code](https:\u002F\u002Fgithub.com\u002Fjustincui03\u002FSelf-Forcing-Plus-Plus)\n  - **[Arxiv, 2025.10]** Pack and Force Your Memory: Long-form and Consistent Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.01784) [Code](https:\u002F\u002Fgithub.com\u002Fwuxiaofei01\u002FPFVG)\n  - **[Arxiv, 2025.09]** LongLive: Real-time Interactive Long Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22622) [Code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FLongLive)\n  - **[Arxiv, 2025.09]** Rolling Forcing: Autoregressive Long Video Diffusion in Real Time [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25161v1) [Page](https:\u002F\u002Fkunhao-liu.github.io\u002FRolling_Forcing_Webpage\u002F) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FRollingForcing)\n  - **[Arxiv, 2025.08]** MIDAS: Multimodal Interactive Digital-humAn Synthesis via Real-time Autoregressive Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19320) [Project](https:\u002F\u002Fchenmingthu.github.io\u002Fmilm\u002F)\n  - **[Arxiv, 2025.08]** Macro-from-Micro Planning for High-Quality and Parallelized Autoregressive Long Video Generation [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2508.03334) [Project](https:\u002F\u002Fnju-xunzhixiang.github.io\u002FAnchor-Forcing-Page\u002F) [Code](https:\u002F\u002Fgithub.com\u002Fxbxsxp9\u002FMMPL)\n  - **[Arxiv, 2025.05]** Self Forcing: Bridging the Train-Test Gap in Autoregressive Video Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08009) [Code](https:\u002F\u002Fgithub.com\u002Fguandeh17\u002FSelf-Forcing)\n  - **[Arxiv, 2025.04]** **MAGI-1**: Autoregressive Video Generation at Scale [Paper](https:\u002F\u002Fstatic.magi.world\u002Fstatic\u002Ffiles\u002FMAGI_1.pdf) [Code](https:\u002F\u002Fgithub.com\u002FSandAI-org\u002FMAGI-1)\n  - **[Arxiv, 2025.04]** SkyReels-V2: Infinite-length Film Generative Model [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.13074) [Code](https:\u002F\u002Fgithub.com\u002FSkyworkAI\u002FSkyReels-V2)\n  - **[Arxiv, 2025.04]** Packing Input Frame Context in Next-Frame Prediction Models for Video Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12626) [Code](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12626)\n  - **[CVPR, 2025]** AR-Diffusion: Asynchronous Video Generation with Auto-Regressive Diffusion [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FSun_AR-Diffusion_Asynchronous_Video_Generation_with_Auto-Regressive_Diffusion_CVPR_2025_paper.html) [Code](https:\u002F\u002Fgithub.com\u002Fiva-mzsun\u002FAR-Diffusion)\n  - **[CVPR, 2025]** From Slow Bidirectional to Fast Autoregressive Video Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07772) [Code](https:\u002F\u002Fgithub.com\u002Ftianweiy\u002FCausVid)\n  - **[NeurIPS, 2024]** FIFO-Diffusion: Generating Infinite Videos from Text without Training [Paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2024\u002Fhash\u002Fa397986e0f34d4b1f0b640686ceaeff7-Abstract-Conference.html) [Code](https:\u002F\u002Fgithub.com\u002Fjjihwan\u002FFIFO-Diffusion_public)\n\n#### Embodied AI\n   - **[Arxiv, 2025.03]** HybridVLA: Collaborative Diffusion and Autoregression in a Unified Vision-Language-Action Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10631) [Code](https:\u002F\u002Fgithub.com\u002FPKU-HMI-Lab\u002FHybrid-VLA)\n   - **[Arxiv, 2024.12]** **Diffusion-VLA**: Scaling Robot Foundation Models via Unified Diffusion and Autoregression [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03293) [Page](https:\u002F\u002Fdiffusion-vla.github.io\u002F)\n   - **[Arxiv, 2024.10]** **Gr-2**: A generative video-language-action model with web-scale knowledge for robot manipulation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06158)\n   - **[Arxiv, 2024.05]** **iVideoGPT**: Interactive VideoGPTs are Scalable World Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223)\n   - **[ICML, 2024]** **Genie:** Generative interactive environments [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223)\n   - **[ICLR, 2024]** **GR-1** Unleashing Large-Scale Video Generative Pre-training for Visual Robot Manipulation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.13139)\n   - **[ICLR, 2023]** **IRIS** Transformers are sample-efficient world models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n     \n### 3D Generation\n#### Motion Generation\n  - **[Arxiv, 2025.06]** Auto-Regressive Surface Cutting [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18017) [Code](https:\u002F\u002Fvictorcheung12.github.io\u002Fseamgpt\u002F)\n  - **[CVPR, 2025]** **Teller**: Real-Time Streaming Audio-Driven Portrait Animation with Autoregressive Motion Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18429) [Page](https:\u002F\u002Fteller-avatar.github.io\u002F)\n  - **[CVPR, 2025]** **ScaMo**: Exploring the Scaling Law in Autoregressive Motion Generation Model [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14559) [Code](https:\u002F\u002Fgithub.com\u002Fshunlinlu\u002FScaMo_code)\n  - **[AAAI, 2024]** **AMD**: Autoregressive Motion Diffusion [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.09381) [Code](https:\u002F\u002Fgithub.com\u002Ffluide1022\u002FAMD)\n  - **[ECCV, 2024]** **BAMM**: Bidirectional Autoregressive Motion Model [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19435) [Code](https:\u002F\u002Fgithub.com\u002Fexitudio\u002FBAMM\u002F?tab=readme-ov-file)\n  - **[CVPR, 2023]**  **T2M-GPT**: Generating Human Motion from Textual Descriptions with Discrete Representations [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.06052)\n  - **[Arxiv, 2022]** **HiT-DVAE**: Human Motion Generation via Hierarchical Transformer Dynamical VAE [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01565)\n  - **[ICCV, 2021 oral]** **HuMoR**: 3D Human Motion Model for Robust Pose Estimation [Paper](https:\u002F\u002Fgeometry.stanford.edu\u002Fprojects\u002Fhumor\u002Fdocs\u002Fhumor.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fdavrempe\u002Fhumor)\n\n#### Point Cloud Generation\n   - **[Arxiv, 2025.06]** ShapeLLM-Omni: A Native Multimodal LLM for 3D Generation and Understanding [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01853) [Code](https:\u002F\u002Fgithub.com\u002FJAMESYJL\u002FShapeLLM-Omni\u002F)\n   - **[Siggraph, 2025]** OctGPT: Octree-based Multiscale Autoregressive Models for 3D Shape Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.09975) [Code](https:\u002F\u002Fgithub.com\u002Foctree-nn\u002Foctgpt)\n   - **[Arxiv, 2025.04]** Efficient Autoregressive Shape Generation via Octree-Based Adaptive Tokenization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02817) [Page](https:\u002F\u002Foat-3d.github.io\u002F)\n   - **[CVPR, 2025]** TreeMeshGPT: Artistic Mesh Generation with Autoregressive Tree Sequencing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11629) [Code](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FTreeMeshGPT)\n   - **[Arxiv, 2025]** 3D Point Cloud Generation via Autoregressive Up-sampling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08594)\n   - **[Arxiv, 2024.02]** Pushing Auto-regressive Models for 3D Shape Generation at Capacity and Scalability [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12225)\n   - **[ECCV, 2022]** Autoregressive 3D Shape Generation via Canonical Mapping [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01955)\n   - **[CVPR workshop, 2023]** Octree transformer: Autoregressive 3d shape generation on hierarchically structured sequences [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.12480)\n\n#### 3D Medical Generation\n  - **[Arxiv, 2024]** Autoregressive Sequence Modeling for 3D Medical Image Representation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.08691v1) \n  - **[Arxiv, 2024]** Medical Vision Generalist: Unifying Medical Imaging Tasks in Context [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565) [Code](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565)\n  - **[MIDL, 2024]** Conditional Generation of 3D Brain Tumor ROIs via VQGAN and Temporal-Agnostic Masked Transformer [Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LLoSHPorlM)\n  - **[NMI, 2024]** Realistic morphology-preserving generative modelling of the brain [Paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs42256-024-00864-0) [Code](https:\u002F\u002Fgithub.com\u002FAmigoLab\u002FBrainSynth)\n  - **[Arxiv, 2023]** Generating 3D Brain Tumor Regions in MRI using Vector-Quantization Generative Adversarial Networks [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01251)\n  - **[ICCV, 2023]** Unaligned 2D to 3D Translation with Conditional Vector-Quantized Code Diffusion using Transformers [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FCorona-Figueroa_Unaligned_2D_to_3D_Translation_with_Conditional_Vector-Quantized_Code_Diffusion_ICCV_2023_paper.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fsamb-t\u002Fx2ct-vqvae)\n  - **[MICCAI, 2022]** Morphology-preserving Autoregressive 3D Generative Modelling of the Brain [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.03177) [Code](https:\u002F\u002Fgithub.com\u002FAmigoLab\u002FSynthAnatomy)\n\n### Multimodal Generation\n#### Unified Understanding and Generation Multi-Modal LLMs\n  - **[NeurIPS, 2025]** JavisGPT: A Unified Multi-modal LLM for Sounding-Video Comprehension and Generation [Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=MZoOpD9NHV) \n  - **[Arxiv, 2025.10]** Wave-Particle (Continuous-Discrete) Dualistic Visual Tokenization for Unified Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.01593) [Code](https:\u002F\u002Fgithub.com\u002FCHEN-YIZHU\u002FWPIT)\n  - **[Arxiv, 2025.10]** NExT-OMNI: Towards Any-to-Any Omnimodal Foundation Models with Discrete Flow Matching [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13721) \n  - **[Arxiv, 2025.10]** Emu3.5: Native Multimodal Models are World Learners [Paper](https:\u002F\u002Femu.world\u002FEmu35_tech_report.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu3.5)\n  - **[Arxiv, 2025.10]** PairUni: Pairwise Training for Unified Multimodal Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.25682) [Code](https:\u002F\u002Fgithub.com\u002FHaochen-Wang409\u002FPairUni)\n  - **[Arxiv. 2025.10]** LightBagel: A Light-weighted, Double Fusion Framework for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.22946) [Page](https:\u002F\u002Fucsc-vlaa.github.io\u002FLightBagel\u002F)\n  - **[Arxiv. 2025.10]** Thinking with Camera: A Unified Multimodal Model for Camera-Centric Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08673) [Code](https:\u002F\u002Fgithub.com\u002FKangLiao929\u002FPuffin)\n  - **[Arxiv, 2025.10]** SRUM: Fine-Grained Self-Rewarding for Unified Multimodal Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.12784) [Code](https:\u002F\u002Fgithub.com\u002FWayneJin0918\u002FSRUM)\n  - **[Arxiv, 2025.10]** UniFlow: A Unified Pixel Flow Tokenizer for Visual Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.10575) [Code](https:\u002F\u002Fgithub.com\u002FZhengrongYue\u002FUniFlow)\n  - **[Arxiv, 2025.10]** UniVideo Unified Understanding, Generation, and Editing for Videos [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08377) [Page](https:\u002F\u002Fcongwei1230.github.io\u002FUniVideo\u002F)\n  - **[Arxiv, 2025.10]** Ming-UniVision: Joint Image Understanding and Generation with a Unified Continuous Tokenizer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.06590) [Code](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing-UniVision)\n  - **[Arxiv, 2025.10]** Lumina-DiMOO: An Omni Diffusion Large Language Model for Multi-Modal Generation and Understanding [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.06308) [Code](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-DiMOO) [Page](https:\u002F\u002Fsynbol.github.io\u002FLumina-DiMOO\u002F)\n  - **[Arxiv, 2025.09]** Query-Kontext: An Unified Multimodal Model for Image Generation and Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26641) \n  - **[Arxiv, 2025.09]** Lavida-O: Elastic Large Masked Diffusion Models for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.19244) \n  - **[Arxiv, 2025.09]** MANZANO: A Simple and Scalable Unified Multimodal Model with a Hybrid Vision Tokenizer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.16197) \n  - **[Arxiv, 2025.09]** RecA: Reconstruction Alignment Improves Unified Multimodal Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.07295) [Code](https:\u002F\u002Fgithub.com\u002FHorizonWind2004\u002Freconstruction-alignment)\n  - **[Arxiv, 2025.09]** Interleaving Reasoning for Better Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.06945) [Code](https:\u002F\u002Fgithub.com\u002FOsilly\u002FInterleaving-Reasoning-Generation)\n  - **[Arxiv, 2025.09]** OneCAT: Decoder-Only Auto-Regressive Model for Unified Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.03498) [Code](https:\u002F\u002Fgithub.com\u002Fonecat-ai\u002Fonecat) [Page](https:\u002F\u002Fgithub.com\u002Fonecat-ai\u002Fonecat)\n  - **[Arxiv, 2025.08]** TBAC-UniImage: Unified Understanding and Generation by Ladder-Side Diffusion Tuning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.08098) [Code](https:\u002F\u002Fgithub.com\u002FDruryXu\u002FTBAC-UniImage)\n  - **[Arxiv, 2025.08]** Bifrost-1: Bridging Multimodal LLMs and Diffusion Models with Patch-level CLIP Latents [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.05954) [Code](https:\u002F\u002Fgithub.com\u002FHL-hanlin\u002FBifrost-1)\n  - **[Arxiv, 2025.08]** Uni-COT: Towards Unified Chain-of-Thought Reasoning Across Text and Vision [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.05606) [Code](https:\u002F\u002Fgithub.com\u002FFr0zenCrane\u002FUniCoT)\n  - **[Arxiv, 2025.08]** UniEdit-I: Training-free Image Editing for Unified VLM via Iterative Understanding, Editing and Verifying [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03142) \n  - **[Arxiv, 2025.08]** Skywork UniPic: Unified Autoregressive Modeling for Visual Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03320) [Code](https:\u002F\u002Fgithub.com\u002FSkyworkAI\u002FUniPic)\n  - **[Arxiv, 2025.07]** Omni-Video: Democratizing Unified Video Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06119) [Code](https:\u002F\u002Fgithub.com\u002FSAIS-FUXI\u002FOmni-Video)\n  - **[Arxiv, 2025.07]** Ovis-U1 Technical Report [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23044) [Code](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FOvis-U1)\n  - **[Qwen, 2025.07]** Qwen VLo: From \"Understanding\" the World to \"Depicting\" It [Blog](https:\u002F\u002Fqwenlm.github.io\u002Fblog\u002Fqwen-vlo\u002F) \n  - **[ICCV, 2025]** USP: Unified Self-Supervised Pretraining for Image Generation and Understanding [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06132) [Code](https:\u002F\u002Fgithub.com\u002FAMAP-ML\u002FUSP)\n  - **[Arxiv, 2025.06]** UniCode²: Cascaded Large-scale Codebooks for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.20214) \n  - **[Arxiv, 2025.06]** OmniGen2: Exploration to Advanced Multimodal Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18871) [Code](https:\u002F\u002Fgithub.com\u002FVectorSpaceLab\u002FOmniGen2)\n  - **[Arxiv, 2025.06]** Vision as a Dialect: Unifying Visual Understanding and Generation via Text-Aligned Representations [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18898) [Code](https:\u002F\u002Fgithub.com\u002Fcsuhan\u002FTar)\n  - **[Arxiv, 2025.06]** UniFork: Exploring Modality Alignment for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.17202) [Code](https:\u002F\u002Fgithub.com\u002Ftliby\u002FUniFork)\n  - **[Arxiv, 2025.06]** Show-o2: Improved Native Unified Multimodal Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.15564) [Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FShow-o)\n  - **[Arxiv, 2025.06]** Ming-Omni: A Unified Multimodal Model for Perception and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09344) [Code](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing\u002Ftree\u002Fmain)\n  - **[Arxiv, 2025.06]** Pisces: An Auto-regressive Foundation Model for Image Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.10395) \n  - **[Arxiv, 2025.06]** UniWorld: High-Resolution Semantic Encoders for Unified Visual Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03147) [Code](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FUniWorld-V1)\n  - **[Arxiv, 2025.06]** ShapeLLM-Omni: A Native Multimodal LLM for 3D Generation and Understanding [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01853) [Code](https:\u002F\u002Fgithub.com\u002FJAMESYJL\u002FShapeLLM-Omni\u002F)\n  - **[Arxiv, 2025.05]** Muddit: Liberating Generation Beyond Text-to-Image with a Unified Discrete Diffusion Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23606) [Code](https:\u002F\u002Fgithub.com\u002FM-E-AGI-Lab\u002FMuddit)\n  - **[Arxiv, 2025.05]** OpenUni: A Simple Baseline for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23661) [Code](https:\u002F\u002Fgithub.com\u002Fwusize\u002FOpenUni)\n  - **[Arxiv, 2025.05]** FUDOKI: Discrete Flow-based Unified Understanding and Generation via Kinetic-Optimal Velocities [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20147)\n  - **[Arxiv, 2025.05]** MMaDA: Multimodal Large Diffusion Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.15809) [Code](https:\u002F\u002Fgithub.com\u002FGen-Verse\u002FMMaDA)\n  - **[Arxiv, 2025.05]** Ming-Lite-Uni: Advancements in Unified Architecture for Natural Multimodal Interaction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.02471) [Code](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing\u002Ftree\u002Fmain\u002FMing-unify)\n  - **[Arxiv, 2025.05]** Emerging Properties in Unified Multimodal Pretraining [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14683) [Code](https:\u002F\u002Fgithub.com\u002Fbytedance-seed\u002FBAGEL)\n  - **[Arxiv, 2025.05]** BLIP3-o: A Family of Fully Open Unified Multimodal Models—Architecture, Training and Dataset [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.09568) [Code](https:\u002F\u002Fgithub.com\u002FJiuhaiChen\u002FBLIP3o)\n  - **[Arxiv, 2025.05]** Selftok: Discrete Visual Tokens of Autoregression, by Diffusion, and for Reasoning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07538) [Project](https:\u002F\u002Fselftok-team.github.io\u002Freport\u002F)\n  - **[Arxiv, 2025.05]** Nexus-Gen: A Unified Model for Image Understanding, Generation, and Editing [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.21356) [Code](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002FNexus-Gen)\n  - **[Arxiv, 2025.05]** TokLIP: Marry Visual Tokens to CLIP for Multimodal Comprehension and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05422) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FTokLIP)\n  - **[Arxiv, 2025.05]** Mogao: An Omni Foundation Model for Interleaved Multi-Modal Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05472) \n  - **[Arxiv, 2025.04]** **VARGPT-v1.1**: Improve Visual Autoregressive Large Unified Model via Iterative Instruction Tuning and Reinforcement Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02949) [Code](https:\u002F\u002Fgithub.com\u002FVARGPT-family\u002FVARGPT-v1.1)\n  - **[Arxiv, 2025.04]** ILLUME+: Illuminating Unified MLLM with Dual Visual Tokenization and Diffusion Refinement [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.01934) [Code](https:\u002F\u002Fgithub.com\u002Fillume-unified-mllm\u002FILLUME_plus)\n  - **[OpenAI, 2025.03]** Addendum to GPT-4o System Card: Native image generation [Paper](https:\u002F\u002Fcdn.openai.com\u002F11998be9-5319-4302-bfbf-1167e093f1fb\u002FNative_Image_Generation_System_Card.pdf)\n  - **[Arxiv, 2025.03]** Harmonizing Visual Representations for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21979) [Code](https:\u002F\u002Fgithub.com\u002Fwusize\u002FHarmon)\n  - **[Arxiv, 2025.03]** Unified Autoregressive Visual Generation and Understanding with Continuous Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.13436) \n  - **[Arxiv, 2025.03]** DualToken: Towards Unifying Visual Understanding and Generation with Dual Visual Vocabularies [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.14324) \n  - **[Arxiv, 2025.03]** OmniMamba: Efficient and Unified Multimodal Understanding and Generation via State Space Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08686) [Code](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FOmniMamba)\n  - **[Arxiv, 2025.02]** UniTok: A Unified Tokenizer for Visual Generation and Understanding [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20321) [Code](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FUniTok)\n  - **[Arxiv, 2025.02]** **HermesFlow**: Seamlessly Closing the Gap in Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.12148) [Code](https:\u002F\u002Fgithub.com\u002FGen-Verse\u002FHermesFlow)\n  - **[Arxiv, 2025.02]** **QLIP**: Text-Aligned Visual Tokenization Unifies Auto-Regressive Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.05178) [Code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FQLIP)\n  - **[Arxiv, 2025.01]** **Janus-Pro**: Unified Multimodal Understanding and Generation with Data and Model Scaling [Paper](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus\u002Fblob\u002Fmain\u002Fjanus_pro_tech_report.pdf) [Code](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2025.01]** **VARGPT**: Unified Understanding and Generation in a Visual Autoregressive Multimodal Large Language Model [Paper](https:\u002F\u002Fpdf.arxiv.org\u002Fabs\u002F2501.12327) [Code](https:\u002F\u002Fgithub.com\u002FVARGPT-family\u002FVARGPT)\n  - **[Arxiv, 2024.12]** **LlamaFusion**: Adapting Pretrained Language Models for Multimodal Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15188) \n  - **[Arxiv, 2024.12]** **MetaMorph**: Multimodal Understanding and Generation via Instruction Tuning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14164) [Page](https:\u002F\u002Ftsb0601.github.io\u002Fmetamorph\u002F)\n  - **[Arxiv, 2024.12]** **Orthus**: Autoregressive Interleaved Image-Text Generation with Modality-Specific Heads [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00127)\n  - **[Arxiv, 2024.12]** Multimodal Latent Language Modeling with Next-Token Diffusion. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.08635) \n  - **[Arxiv, 2024.12]** **ILLUME**: Illuminating Your LLMs to See, Draw, and Self-Enhance. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.06673) \n  - **[Arxiv, 2024.11]** **JanusFlow**: Harmonizing Autoregression and Rectified Flow for Unified Multimodal Understanding and Generation. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.07975) [Project](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2024.11]** Unified Generative and Discriminative Training for Multi-modal Large Language Models. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00304) [Project](https:\u002F\u002Fsugar-mllm.github.io\u002F)\n  - **[Arxiv, 2024.10]** **D-JEPA**: Denoising with a Joint-Embedding Predictive Architecture [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03755) [project](https:\u002F\u002Fd-jepa.github.io\u002F)\n  - **[Arxiv, 2024.10]** **Janus**: Decoupling Visual Encoding for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13848) [Code](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2024.10]** **MMAR**: Towards Lossless Multi-Modal Auto-Regressive Probabilistic Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10798) [Code](https:\u002F\u002Fgithub.com\u002FydcUstc\u002FMMAR)\n  - **[Arxiv, 2024.10]** **ACDC**: Autoregressive Coherent Multimodal Generation using Diffusion Correction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.04721) [Code](https:\u002F\u002Facdc2025.github.io\u002F)\n  - **[Arxiv, 2024.09]** **Emu3**: Next-Token Prediction is All You NeedPaper Name. [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18869) [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu3) [Project](https:\u002F\u002Femu.baai.ac.cn\u002Fabout)\n  - **[Arxiv, 2024.09]** **VILA-U**: a Unified Foundation Model Integrating Visual Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04429) [Code](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Fvila-u)\n  - **[Arxiv, 2024.09]** **MIO**: A Foundation Model on Multimodal Tokens [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.17692) \n  - **[Arxiv, 2024.08]** **Show-o:** One Single Transformer to Unify Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12528) [Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FShow-o)\n  - **[Arxiv, 2024.08]** **Transfusion:** Predict the Next Token and Diffuse Images with One Multi-Modal Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.11039) [Code](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Ftransfusion-pytorch)\n  - **[Arxiv, 2024.07]** **SEED-Story**: Multimodal Long Story Generation with Large Language Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08683) [Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Story)\n  - **[Arxiv, 2024.05]**  **Chameleon**: Mixed-Modal Early-Fusion Foundation Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.09818) [Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fchameleon)\n  - **[Arxiv, 2024.04]** **SEED-X** Multimodal Models with UnifiedMulti-granularity Comprehension and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.14396) [Code](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED-X)\n  - **[ICML, 2024]** **Libra**: Building Decoupled Vision System on Large Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.10140) [Code](https:\u002F\u002Fgithub.com\u002FYifanXu74\u002FLibra)\n  - **[CVPR, 2024]** **Unified-IO 2**: Scaling Autoregressive Multimodal Models with Vision Language Audio and Action[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.17172) [Code](https:\u002F\u002Fgithub.com\u002Fallenai\u002Funified-io-2)\n  - **[CVPR, 2024]** **Anole**: An Open, Autoregressive and Native Multimodal Models for Interleaved Image-Text Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06135) [Code](https:\u002F\u002Fgithub.com\u002FGAIR-NLP\u002Fanole)\n  - **[Arxiv, 2023.11]** **InstructSeq**: Unifying Vision Tasks with Instruction-conditioned Multi-modal Sequence Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.18835) [Code](https:\u002F\u002Fgithub.com\u002Frongyaofang\u002FInstructSeq)\n  - **[ICLR, 2024]** **Kosmos-G**: Generating Images in Context with Multimodal Large Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.02992) [Code](https:\u002F\u002Fgithub.com\u002Fxichenpan\u002FKosmos-G)\n  - **[ICLR, 2024]** **LaVIT**: Unified Language-Vision Pretraining in LLM with Dynamic Discrete Visual Tokenization [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.04669) [Code](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FLaVIT)\n  - **[ICLR, 2024]** **SEED-LLaMA** Making LLaMA SEE and Draw with SEED Tokenizer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01218) [Code](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED)\n  - **[ICLR, 2024]** **EMU** Generative Pretraining in Multimodality [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.05222) [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu)\n  - **[Arxiv, 2023.09]** **CM3Leon:** Scaling Autoregressive Multi-Modal Models: Pretraining and Instruction Tuning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.02591) [Code](https:\u002F\u002Fgithub.com\u002Fkyegomez\u002FCM3Leon)\n  - **[Arxiv, 2023.07]** **SEED** Planting a SEED of Vision in Large Language Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.08041) [Code](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED)\n  - **[NeurIPS, 2023]** **SPAE**: Semantic Pyramid AutoEncoder for Multimodal Generation with Frozen LLMs [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.17842)\n  - **[ICLR, 2023]** **Unified-IO**: A unified model for vision, language, and multi-modal tasks [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08916) [Code](https:\u002F\u002Funified-io.allenai.org\u002F)\n  - **[ICML, 2023]** Grounding Language Models to Images for Multimodal Inputs and Outputs [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.13823) [Code](https:\u002F\u002Fgithub.com\u002Fkohjingyu\u002Ffromage)\n  - **[NeurIPS, 2022]** **Flamingo**: a Visual Language Model for Few-Shot Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.14198)\n  - **[Arxiv, 2021.12]** **ERNIE-ViLG**: Unified Generative Pre-training for Bidirectional Vision-Language Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.15283) \n  - **[KDD, 2021]** **M6:** A Chinese Multimodal Pretrainer [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.00823)\n\n### Personalized Image Generation\n  - **[Arxiv, 2025.10]** TokenAR: Multiple Subject Generation via Autoregressive Token-level enhancement [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16332) [Code](https:\u002F\u002Fgithub.com\u002Flyrig\u002FTokenAR)\n  - **[Arxiv, 2025.09]** EchoGen: Generating Visual Echoes in Any Scene via Feed-Forward Subject-Driven Auto-Regressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26127)\n  - **[Arxiv, 2025.08]** CoAR: Concept Injection into Autoregressive Models for Personalized Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.07341) [Code](https:\u002F\u002Fgithub.com\u002FKZF-kzf\u002FCoAR)\n  - **[ICCV, 2025]** CSD-VAR: Content-Style Decomposition in Visual Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.13984)\n  - **[Arxiv, 2025.07]** A Training-Free Style-Personalization via Scale-wise Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04482)\n  - **[Arxiv, 2025.04]** A Training-Free Style-aligned Image Generation with Scale-wise Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.06144) \n  - **[Arxiv, 2025.04]** Personalized Text-to-Image Generation with Auto-Regressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.13162) [Code](https:\u002F\u002Fgithub.com\u002FKaiyueSun98\u002FT2I-Personalization-with-AR)\n  - **[CVPR, 2025]** Zero-Shot Styled Text Image Generation, but Make It Autoregressive [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17074)\n\n### Other Generation\n  - **[Arxiv, 2025.05]** RestoreVAR: Visual Autoregressive Generation for All-in-One Image Restoration [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18047) [Code](https:\u002F\u002Fgithub.com\u002Fsudraj2002\u002FRestoreVAR)\n  - **[Arxiv, 2025.04]** TAPNext: Tracking Any Point (TAP) as Next Token Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.05579) \n  - **[Arxiv, 2025.04]** Beyond Words: Advancing Long-Text Image Generation via Multimodal Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20198) [Code](https:\u002F\u002Ffingerrec.github.io\u002Flongtextar\u002F)\n  - **[Arxiv, 2025.03]** VARSR: Visual Autoregressive Modeling for Image Super Resolution [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.18993) [Code](https:\u002F\u002Fgithub.com\u002Fquyp2000\u002FVARSR)\n  - **[Arxiv, 2025.03]** Next-Scale Autoregressive Models are Zero-Shot Single-Image Object View Synthesizers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.13588) [Code](https:\u002F\u002Fgithub.com\u002FShiran-Yuan\u002FArchonView)\n  - **[Arxiv, 2025.03]** Perceive, Understand and Restore: Real-World Image Super-Resolution with Autoregressive Multimodal Generative Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11073) [Code](https:\u002F\u002Fgithub.com\u002Fnonwhy\u002FPURE)\n  - **[Arxiv, 2025.02]** ARTalk: Speech-Driven 3D Head Animation via Autoregressive Model [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20323) [Code](https:\u002F\u002Fxg-chu.site\u002Fproject_artalk\u002F)\n  - **[Arxiv, 2025.02]** Poly-Autoregressive Prediction for Modeling Interactions [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.08646) \n  - **[Arxiv, 2025.02]** SongGen: A Single Stage Auto-regressive Transformer for Text-to-Song Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13128) [Code](https:\u002F\u002Fgithub.com\u002FLiuZH-19\u002FSongGen)\n  - **[Arxiv, 2024.12]** DriveGPT: Scaling Autoregressive Behavior Models for Driving [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14415) \n  - **[TII, 2025]** VarAD: Lightweight High-Resolution Image Anomaly Detection via Visual Autoregressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.17263) [Code](https:\u002F\u002Fgithub.com\u002Fcaoyunkang\u002FVarAD)\n  - **[Arxiv, 2024.12]** **DrivingGPT**: Unifying Driving World Modeling and Planning with Multi-modal Autoregressive Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18607) [Page](https:\u002F\u002Frogerchern.github.io\u002FDrivingGPT\u002F)\n  - **[Arxiv, 2024.12]** Advancing Auto-Regressive Continuation for Video Frames [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.03758)\n  - **[Arxiv, 2024.12]** It Takes Two: Real-time Co-Speech Two-person’s Interaction Generation via Reactive Auto-regressive Diffusion Model [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02419)\n  - **[Arxiv, 2024.12]** **X-Prompt**: Towards Universal In-Context Image Generation in Auto-Regressive Vision Language Foundation Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01824) [Code](https:\u002F\u002Fgithub.com\u002FSunzeY\u002FX-Prompt)\n  - **[Arxiv, 2024.12]** **3D-WAG**: Hierarchical Wavelet-Guided Autoregressive Generation for High-Fidelity 3D Shapes [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19037) [Code](https:\u002F\u002Fgithub.com\u002FTejaswiniMedi\u002F3DWAG-AR)\n  - **[Arxiv, 2024.11]** **SAR3D**: Autoregressive 3D Object Generation and Understanding via Multi-scale 3D VQVAE [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.16856) [Code](https:\u002F\u002Fgithub.com\u002Fcyw-3d\u002FSAR3D)\n  - **[Arxiv, 2024.11]** Scalable Autoregressive Monocular Depth Estimation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.11361) \n  - **[Arxiv, 2024.11]** LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09595) [Code](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FLLaMa-Mesh)\n  - **[Arxiv, 2024.10]** DART: A Diffusion-Based Autoregressive Motion Model for Real-Time Text-Driven Motion Control [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05260)\n  - **[Arxiv, 2024.10]** Autoregressive Action Sequence Learning for Robotic Manipulation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03132) [Code](https:\u002F\u002Fgithub.com\u002Fmlzxy\u002Farp)\n  - **[Arxiv, 2024.09]** BAD: Bidirectional Auto-regressive Diffusion for Text-to-Motion Generation [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2409.10847) [Code](https:\u002F\u002Fgithub.com\u002FRohollahHS\u002FBAD)\n  - **[Arxiv, 2024.07]** Video In-context Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07356)\n  - **[CVPR, 2024]** Sequential Modeling Enables Scalable Learning for Large Vision Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00785) [Code](https:\u002F\u002Fgithub.com\u002Fytongbai\u002FLVM)\n  - **[AAAI, 2024]** Autoregressive Omni-Aware Outpainting for Open-Vocabulary 360-Degree Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.03467) [Code](https:\u002F\u002Fgithub.com\u002FzhuqiangLu\u002FAOG-NET-360)\n  - **[arxiv, 2024]** **LM4LV**: A Frozen Large Language Model for Low-level Vision Tasks [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15734) [Code](https:\u002F\u002Fgithub.com\u002Fbytetriper\u002FLM4LV)\n  - **[CVPR, 2024]** **ARTrackV2**: Prompting Autoregressive Tracker Where to Look and How to Describe [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.17133) [Code](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)\n  - **[CVPR, 2023 Highlight]** Autoregressive Visual Tracking [Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FWei_Autoregressive_Visual_Tracking_CVPR_2023_paper.pdf) [Code](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)\n  - **[CVPR, 2023]** **Visual Chain of Thought**: Bridging Logical Gaps with Multimodal Infillings [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.02317) \n  - **[NeurIPS, 2022]** Visual Prompting via Image Inpainting [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.00647) [Code](https:\u002F\u002Fgithub.com\u002Famirbar\u002Fvisual_prompting)\n  - **[EMNLP, 2022]** **MAGMA** – Multimodal Augmentation of Generative Models through Adapter-based Finetuning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.05253)\n  - **[NeurIPS, 2021]** Multimodal Few-Shot Learning with Frozen Language Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.13884)\n  - **[ECCV, 2020]** Autoregressive Unsupervised Image Segmentation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.08247)\n\n### Benchmark \u002F Analysis\n  - **[Arxiv, 2025.09]** GenExam: A Multidisciplinary Text-to-Image Exam [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.14232) [Code](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FGenExam)\n  - **[Arxiv, 2025.09]** The Telephone Game: Evaluating Semantic Drift in Unified Models [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.04438) [Code](https:\u002F\u002Fgithub.com\u002Fmollahsabbir\u002FSemantic-Drift-in-Unified-Models)\n  - **[Arxiv, 2025.08]** Echo-4o: Harnessing the Power of GPT-4o Synthetic Images for Improved Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.09987) [Code](https:\u002F\u002Fgithub.com\u002Fyejy53\u002FEcho-4o)\n  - **[Arxiv, 2025.07]** GPT-IMAGE-EDIT-1.5M: A Million-Scale, GPT-Generated Image Dataset [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.21033) [Code](https:\u002F\u002Fgithub.com\u002Fwyhlovecpp\u002FGPT-Image-Edit\u002Ftree\u002Fmain)\n  - **[Arxiv, 2025.05]** ImgEdit: A Unified Image Editing Dataset and Benchmark [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20275) [Code](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FImgEdit)\n  - **[Arxiv, 2025.05]** RISEBench: Envisioning Beyond the Pixels: Benchmarking Reasoning-Informed Visual Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02826) [Code](https:\u002F\u002Fgithub.com\u002FPhoenixZ810\u002FRISEBench)\n  - **[Arxiv, 2025.05]** Are Unified Vision-Language Models Necessary: Generalization Across Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23043) [Code](https:\u002F\u002Fgithub.com\u002FMajorDavidZhang\u002FGeneralization_unified_VLM)\n  - **[Arxiv, 2025.05]** TokBench: Evaluating Your Visual Tokenizer before Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18142) [Code](https:\u002F\u002Fgithub.com\u002Fwjf5203\u002FTokBench)\n  - **[Arxiv, 2025.05]** VTBench: Evaluating Visual Tokenizers for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.13439) [Code](https:\u002F\u002Fgithub.com\u002Fhuawei-lin\u002FVTBench)\n  - **[Arxiv, 2025.05]** UniEval: Unified Holistic Evaluation for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10483) [Code](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FUniEval)\n  - **[Arxiv, 2025.05]** WorldGenBench: A World-Knowledge-Integrated Benchmark for Reasoning-Driven Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.01490) [Code](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fworldrl\u002FWorldGenBench)\n  - **[Arxiv, 2025.04]** MME-Unify: A Comprehensive Benchmark for Unified Multimodal Understanding and Generation Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.03641) [Code](https:\u002F\u002Fgithub.com\u002FMME-Benchmarks\u002FMME-Unify)\n  - **[Arxiv, 2025.04]** GPT-ImgEval: A Comprehensive Benchmark for Diagnosing GPT4o in Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02782) [Code](https:\u002F\u002Fgithub.com\u002FPicoTrex\u002FGPT-ImgEval)\n  - **[Arxiv, 2025.03]** WISE: A World Knowledge-Informed Semantic Evaluation for Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07265) [Code](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FWISE)\n  - **[Arxiv, 2025.03]** Error Analyses of Auto-Regressive Video Diffusion Models: A Unified Framework [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10704) [Code](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FMeta-ARVDM)\n  - **[Arxiv, 2024.10]** Diffusion Beats Autoregressive: An Evaluation of Compositional Generation in Text-to-Image Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.22775) \n\n### Reasoning Alignment \n  - **[Arxiv, 2025.10]** Improving Chain-of-Thought Efficiency for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.05593) \n  - **[Arxiv, 2025.09]** STAGE: Stable and Generalizable GRPO for Autoregressive Image Generation [Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.25027) [Code](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAGE)\n  - **[Arxiv, 2025.09]** Group Critical-token Policy Optimization for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22485) [Code](https:\u002F\u002Fgithub.com\u002Fzghhui\u002FGCPO)\n  - **[Arxiv, 2025.09]** Understanding-in-Generation: Reinforcing Generative Capability of Unified Model via Infusing Understanding into Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.18639) [Code](https:\u002F\u002Fgithub.com\u002FQC-LY\u002FUiG)\n  - **[Arxiv. 2025.09]** Can Understanding and Generation Truly Benefit Together — or Just Coexist? [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.09666) [Code](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FUAE)\n  - **[Arxiv, 2025.08]** AR-GRPO: Training Autoregressive Image Generation Models via Reinforcement Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06924) [Code](https:\u002F\u002Fgithub.com\u002FKwai-Klear\u002FAR-GRPO)\n  - **[Arxiv, 2025.08]** The Promise of RL for Autoregressive Image Editing [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01119) [Code](https:\u002F\u002Fgithub.com\u002Fmair-lab\u002FEARL)\n  - **[Arxiv, 2025.07]** X-Omni: Reinforcement Learning Makes Discrete Autoregressive Image Generative Models Great Again [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.22058) [Code](https:\u002F\u002Fgithub.com\u002FX-Omni-Team\u002FX-Omni) [Page](https:\u002F\u002Fx-omni-team.github.io\u002F)\n  - **[Arxiv, 2025.07]** CoT-lized Diffusion: Let’s Reinforce T2I Generation Step-by-step [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04451) \n  - **[Arxiv, 2025.06]** Delving into RL for Image Generation with CoT: A Study on DPO vs. GRPO [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.17017) [Code](https:\u002F\u002Fgithub.com\u002FZiyuGuo99\u002FImage-Generation-CoT)\n  - **[Arxiv, 2025.06]** Unlocking Aha Moments via Reinforcement Learning: Advancing Collaborative Visual Comprehension and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01480) [Code](https:\u002F\u002Fjanus-pro-r1.github.io\u002F)\n  - **[Arxiv, 2025.06]** ReasonGen-R1: CoT for Autoregressive Image Generation model through SFT and RL [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24875) [Code](https:\u002F\u002Fgithub.com\u002FFranklin-Zhang0\u002FReasonGen-R1)\n  - **[Arxiv, 2025.05]** UniRL: Self-Improving Unified Multimodal Models via Supervised and Reinforcement Learning [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23380) [Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FUniRL)\n  - **[Arxiv, 2025.05]** UniGen: Enhanced Training & Test-Time Strategies for Unified Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14682) \n  - **[Arxiv, 2025.04]** SimpleAR: Pushing the Frontier of Autoregressive Visual Generation through Pretraining, SFT, and RL [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11455) [Code](https:\u002F\u002Fgithub.com\u002Fwdrink\u002FSimpleAR)\n  - **[Arxiv, 2025.03]** **LightGen**: Efficient Image Generation through Knowledge Distillation and Direct Preference Optimization [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.08619) [Code](https:\u002F\u002Fgithub.com\u002FXianfengWu01\u002FLightGen)\n  - **[Arxiv, 2025.02]** Autoregressive Image Generation Guided by Chains of Thought [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16965) [Code](https:\u002F\u002Fgithub.com\u002FLTH14\u002Ffractalgen)\n  - **[Arxiv, 2025.01]** Can We Generate Images with CoT? Let's Verify and Reinforce Image Generation Step by Step [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13926) [Code](https:\u002F\u002Fgithub.com\u002FZiyuGuo99\u002FImage-Generation-CoT)\n\n### Safety\n  - **[Arxiv, 2025.09]** Closing the Safety Gap: Surgical Concept Erasure in Visual Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22400)\n  - **[Arxiv, 2025.06]** BitMark for Infinity: Watermarking Bitwise Autoregressive Image Generative Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21209) \n  - **[Arxiv, 2025.06]** EAR: Erasing Concepts from Unified Autoregressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.20151) [Code](https:\u002F\u002Fgithub.com\u002Fimmc-lab\u002Fear\u002F)\n  - **[Arxiv, 2025.06]** Watermarking Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.16349) [Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fwmar)\n  - **[Arxiv, 2025.06]** A Watermark for Auto-Regressive Image Generation Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.11371)\n  - **[Arxiv, 2025.05]** Training-Free Watermarking for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14673) [Code](https:\u002F\u002Fgithub.com\u002Fmaifoundations\u002FIndexMark)\n  - **[Arxiv, 2025.02]** Privacy Attacks on Image AutoRegressive Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02514) [Code](https:\u002F\u002Fgithub.com\u002Fsprintml\u002Fprivacy_attacks_against_iars)\n\n### Accelerating\n  - **[Arxiv, 2025.10]** Hawk: Leveraging Spatial Context for Faster Autoregressive Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.25739)\n  - **[Arxiv, 2025.10]** MC-SJD: Maximal Coupling Speculative Jacobi Decoding for Autoregressive Visual Generation Acceleration [Paper](https:\u002F\u002Farxiv.org\u002Fhtml\u002F2510.24211v1) \n  - **[NeurIPS 2025, Arxiv\u002F2025.10]** Speculative Jacobi-Denoising Decoding for Accelerating Autoregressive Text-to-image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08994) \n  - **[Arxiv, 2025.09]** Hyper-Bagel: A Unified Acceleration Framework for Multimodal Understanding and Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18824) [Page](https:\u002F\u002Fhyper-bagel.github.io\u002F)\n  - **[Arxiv, 2025.07]** Locality-aware Parallel Decoding for Efficient Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.01957) [Code](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Flpd)\n  - **[Arxiv, 2025.05]** DiSA: Diffusion Step Annealing in Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20297) [Code](https:\u002F\u002Fgithub.com\u002FQinyu-Allen-Zhao\u002FDiSA)\n  - **[Arxiv, 2025.05]** FastCar: Cache Attentive Replay for Fast Auto-Regressive Video Generation on the Edge [Code](https:\u002F\u002Fgithub.com\u002Fshawnricecake\u002Ffast-car) [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14709)\n  - **[Arxiv, 2025.04]** Fast Autoregressive Models for Continuous Latent Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18391)\n  - **[ICLR, 2025]** Distilled Decoding 1: One-step Sampling of Image Auto-regressive Models with Flow Matching [Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zKlFXV87Pp) [Code](https:\u002F\u002Fgithub.com\u002Fimagination-research\u002Fdistilled-decoding)\n  - **[CVPR oral, 2025]** Autoregressive Distillation of Diffusion Transformers [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11295) [Code](https:\u002F\u002Fgithub.com\u002Falsdudrla10\u002FARD)\n  - **[Arxiv, 2025.04]** Head-Aware KV Cache Compression for Efficient Visual Autoregressive Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.09261)\n  - **[CVPR, 2025]** From Slow Bidirectional to Fast Autoregressive Video Diffusion Models [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07772) [Code](https:\u002F\u002Fgithub.com\u002Ftianweiy\u002FCausVid)\n  - **[Arxiv, 2025.03]** Fast Autoregressive Video Generation with Diagonal Decoding [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.14070)\n  - **[CVPR 2025, 2025\u002F2024.12]** Parallelized Autoregressive Visual Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15119) [Code](https:\u002F\u002Fgithub.com\u002FEpiphqny\u002FPAR)\n  - **[Arxiv, 2024.11]** Collaborative Decoding Makes Visual Auto-Regressive Modeling Efficient [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17787) [Code](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FCoDe)\n  - **[Arxiv, 2024.11]** Continuous Speculative Decoding for Autoregressive Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.11925) [Code](https:\u002F\u002Fgithub.com\u002FMarkXCloud\u002FCSpD)\n  - **[ICLR, 2025\u002F2024.10]** Accelerating Auto-regressive Text-to-Image Generation with Training-free Speculative Jacobi Decoding [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01699)\n    \n### Stability & Scaling \n  - **[Arxiv, 2025.03]** Improving Autoregressive Image Generation through Coarse-to-Fine Token Prediction [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.16194) [Code](https:\u002F\u002Fgithub.com\u002FGzyAftermath\u002FCTF)\n  - **[Arxiv, 2025.03]** Teaching Metric Distance to Autoregressive Multimodal Foundational Models [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02379) \n  - **[Arxiv, 2024.12]** 3D representation in 512-Byte: Variational tokenizer is the key for autoregressive 3D generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02202) [Page](https:\u002F\u002Fsparse-mvs-2.github.io\u002FVAT.IO\u002F)\n  - **[Arxiv, 2024.12]** JetFormer: An autoregressive generative model of raw images and text [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19722) \n  - **[Arxiv, 2024.10]** Elucidating the Design Space of Language Models for Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.16257) [Code](https:\u002F\u002Fgithub.com\u002FPepper-lll\u002FLMforImageGeneration)\n  - **[NeurIPS, 2024]** Stabilize the Latent Space for Image Autoregressive Modeling: A Unified Perspective [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12490) [Code](https:\u002F\u002Fgithub.com\u002FDAMO-NLP-SG\u002FDiGIT)\n  - **[Arxiv, 2024.09]** Pre-trained Language Models Do Not Help Auto-regressive Text-to-Image Generation [Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.16201)\n  - **[Arxiv, 2020]** Scaling Laws for Autoregressive Generative Modeling [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.14701)\n\n### Tutorial\n-  [MIT 6.S978 Deep Generative Models](https:\u002F\u002Fmit-6s978.github.io\u002Fassets\u002Fpdfs\u002Flec3_ar.pdf) by Kaiming He\n-  [UvA DL Notebooks](https:\u002F\u002Fuvadlc-notebooks.readthedocs.io\u002Fen\u002Flatest\u002Ftutorial_notebooks\u002Ftutorial12\u002FAutoregressive_Image_Modeling.html)\n-  [MSC Deep Learning](https:\u002F\u002Fhal.cse.msu.edu\u002Fteaching\u002F2022-fall-deep-learning\u002F18-autoregressive-models\u002F#\u002F2)\n\n### Evaluation Metrics\n| Metric                              | Analysis Type               | Reference                                            |\n|-------------------------------------|-----------------------------|------------------------------------------------------|\n| Inception Score (IS) ↑                | Quantitative              | [Salimans et al., 2016](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.03498)                                |\n| Fréchet Inception Distance (FID)  ↓   | Quantitative              | [Heusel et al., 2017](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.08500)                                  |\n| Kernel Inception Distance (KID)  ↓    | Quantitative              | [Binkowski et al., 2018](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1801.01401)                               |\n| Precision and Recall ↑                | Quantitative              | [Powers, 2020](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.16061)                                         |\n| CLIP Maximum Mean Discrepancy ↓       | Quantitative              | [Jayasumana et al., 2023](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.09603)                              |\n| CLIP Score ↑                          | Quantitative              | [Hessel et al., 2021](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08718)                                  |\n| R-precision ↑                         | Quantitative              | [Craswell et al., 2009](https:\u002F\u002Fdoi.org\u002F10.1007\u002F978-0-387-39940-9_486)                   |\n| Perceptual Path Length  ↓             | Quantitative              | [Karras et al., 2019](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.04948)                                  |\n| Fréchet Video Distance (FVD) ↓        | Quantitative              | [Unterthiner et al., 2019](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.01717)                             |\n| Aesthetic (Expert Evaluation) ↑       | Qualitative               | Based on domain expertise                                                                |\n| Turing Test                           | Qualitative               | [Turing, 1950](https:\u002F\u002Facademic.oup.com\u002Fmind\u002Farticle\u002FLIX\u002F236\u002F433\u002F986238)                 |\n| User Studies (ratings, satisfaction)↑ | Qualitative               | Various, depending on the user study methodology                                         |\n\n## 👍 Acknowledgement\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FAwesome-Unified-Multimodal-Models), NUS\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002FPurshow\u002FAwesome-Unified-Multimodal), PKU\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FAwesome-Unified-Multimodal-Models), [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02567), Alibaba\n\n\n## ♥️ Contributors\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_6e44039432bc.png\" \u002F>\n\u003C\u002Fa>\n\n\n","\u003Cdiv align=center>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_22f7bed6b2c1.png\" width=\"160px\">\n\u003C\u002Fdiv>\n\u003Ch2 align=\"center\"> \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902\">[TMLR 2025] 视觉领域中的优秀自回归模型  \u003Cdiv align=center> \u003C\u002Fa>\u003C\u002Fh2>\n\u003Ch5 align=\"center\"> 如果你喜欢我们的项目，请在 GitHub 上为我们点亮一颗星 ⭐，以获取最新更新。\u003C\u002Fh5>\n\n\u003Ch5 align=\"center\">\n\n   [![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n   [![arxiv](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FArxiv-2411.05902-red)](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.05902.pdf)\n   [![TechBeat](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F机器之心%20-black)](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F_O8W1qgvMZu37IKwgtskMA)\n   ![GitHub Repo stars](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey)\n\n\u003C\u002Fh5>\n\n自回归模型通过顺序建模依赖关系，在生成高质量内容方面取得了显著进展。本仓库是一个精心整理的列表，收录了关于视觉领域自回归模型最新进展的相关论文。\n\n> **论文**: [[TMLR 2025🔥]](https:\u002F\u002Fopenreview.net\u002Fforum?id=1BqXkjNEGP) [视觉领域的自回归模型：综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902) | [[中文解读]](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F_O8W1qgvMZu37IKwgtskMA)\n\n> **作者**: *Jing Xiong\u003Csup>1,†\u003C\u002Fsup>, Gongye Liu\u003Csup>2,†\u003C\u002Fsup>, Lun Huang\u003Csup>3\u003C\u002Fsup>, Chengyue Wu\u003Csup>1\u003C\u002Fsup>, Taiqiang Wu\u003Csup>1\u003C\u002Fsup>, Yao Mu\u003Csup>1\u003C\u002Fsup>, Yuan Yao\u003Csup>4\u003C\u002Fsup>, Hui Shen\u003Csup>5\u003C\u002Fsup>, Zhongwei Wan\u003Csup>5\u003C\u002Fsup>, Jinfa Huang\u003Csup>4\u003C\u002Fsup>, Chaofan Tao\u003Csup>1,‡\u003C\u002Fsup>, Shen Yan\u003Csup>6\u003C\u002Fsup>, Huaxiu Yao\u003Csup>7\u003C\u002Fsup>, Lingpeng Kong\u003Csup>1\u003C\u002Fsup>, Hongxia Yang\u003Csup>9\u003C\u002Fsup>, Mi Zhang\u003Csup>5\u003C\u002Fsup>, Guillermo Sapiro\u003Csup>8,10\u003C\u002Fsup>, Jiebo Luo\u003Csup>4\u003C\u002Fsup>, Ping Luo\u003Csup>1\u003C\u002Fsup>, Ngai Wong\u003Csup>1\u003C\u002Fsup>*\n\n> *\u003Csup>1\u003C\u002Fsup>香港大学, \u003Csup>2\u003C\u002Fsup>清华大学, \u003Csup>3\u003C\u002Fsup>杜克大学, \u003Csup>4\u003C\u002Fsup>罗切斯特大学, \u003Csup>5\u003C\u002Fsup>俄亥俄州立大学, \u003Csup>6\u003C\u002Fsup>字节跳动, \u003Csup>7\u003C\u002Fsup>北卡罗来纳大学教堂山分校, \u003Csup>8\u003C\u002Fsup>苹果公司, \u003Csup>9\u003C\u002Fsup>香港理工大学, \u003Csup>10\u003C\u002Fsup>普林斯顿大学*\n\n> *\u003Csup>†\u003C\u002Fsup>核心贡献者, \u003Csup>‡\u003C\u002Fsup>通讯作者*\n\n\u003Cbr>\n\n\u003Cdetails open>\u003Csummary>💡 我们还有其他生成式项目，或许也会引起你的兴趣 ✨。 \u003C\u002Fsummary>\u003Cp>\n\u003C!--  may -->\n\n> [**个性化视频生成：进展、应用与挑战**]() \u003Cbr>\n> Jinfa Huang, Shenghai Yuan, Kunyang Li, and Meng Cao etc. \u003Cbr>\n[![github](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Github-black?logo=github)](https:\u002F\u002Fgithub.com\u002FinFaaa\u002FAwesome-Personalized-Video-Creation)  [![github](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FinFaaa\u002FAwesome-Personalized-Video-Creation.svg?style=social)](https:\u002F\u002Fgithub.com\u002FinFaaa\u002FAwesome-Personalized-Video-Creation) \u003Cbr>\n\n> \u003C\u002Fp>\u003C\u002Fdetails>\n\n\n## 📑 引用\n如果你的工作中使用了本仓库的内容，请考虑引用我们的论文。衷心感谢！\n\n```BibTeX\n@misc{xiong2024autoregressive,\n    title={Autoregressive Models in Vision: A Survey},\n    author={Jing Xiong and Gongye Liu and Lun Huang and Chengyue Wu and Taiqiang Wu and Yao Mu and Yuan Yao and Hui Shen and Zhongwei Wan and Jinfa Huang and Chaofan Tao and Shen Yan and Huaxiu Yao and Lingpeng Kong and Hongxia Yang and Mi Zhang and Guillermo Sapiro and Jiebo Luo and Ping Luo and Ngai Wong},\n    year={2024},\n    eprint={2411.05902},\n    archivePrefix={arXiv},\n    primaryClass={cs.CV}\n}\n```\n\n## 📣 最新动态\n\n`[2025-11-01]` ⏸️ 经过一年自回归视觉生成技术的快速发展，目前该领域已形成两大明确趋势：**统一的多模态模型**和**自回归扩散驱动的视频生成**。我们现有的仓库分类已无法全面反映这一不断演化的格局，因此我们将进入维护模式，并从今天起**暂停主动更新**。不过，仓库仍可作为参考资料使用，我们也欢迎针对新趋势的**定向 PR 提交**（如新增内容、修正错误或重新组织结构）。感谢大家的支持！🙏\n\n`[2025-05-31]` 🔥 我们的综述已在 [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902) 上进行了修订！修订后的论文精简了内容，并进一步深化了以下方面的讨论：\n   - 连续自回归方法\n   - 计算成本\n   - 更详细的评估指标\n   - 扩展未来应用路线图\n\n`[2025-03-11]` 🔥 我们的综述 [《视觉领域的自回归模型：综述》](https:\u002F\u002Fopenreview.net\u002Fforum?id=1BqXkjNEGP) 已被 TMLR 2025 接受！\n\n`[2024-11-11]` 我们发布了综述：[《视觉领域的自回归模型：综述》](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05902)。\n\n`[2024-10-13]` 我们正式启用了该仓库。\n\n\u003Cdiv align=center>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_15bbafc53273.png\" width=\"800px\">\n\u003C\u002Fdiv>\n\n\n## ⚡ 贡献\n我们欢迎所有反馈、建议以及有助于改进本综述和仓库的贡献，使其成为整个社区的宝贵资源。\n我们将根据新出现的研究成果，持续维护本仓库。如果你对我们的分类体系有任何建议，或者发现遗漏的论文，又或是有已被某个会议或期刊接受的预印本论文，都欢迎随时提出。\n\n如果你想将自己的工作或模型加入本列表，请随时发送邮件至 jhuang90@ur.rochester.edu，或提交 [拉取请求]([https:\u002F\u002Fgithub.com\u002FChaofanTao\u002Fautoregressive-vision-survey\u002Fpulls](https:\u002F\u002Fgithub.com\u002FChaofanTao\u002Fautoregressive-vision-survey\u002Fpulls))。Markdown 格式如下：\n\n```markdown\n* [**会议或期刊名称 + 年份**] 论文标题。[论文](链接) [代码](链接)\n```\n\n\n## 📖 目录\n  - [图像生成](#image-generation)\n    - [无条件\u002F类别条件图像生成](#unconditionalclass-conditioned-image-generation)\n    - [文本到图像生成](#text-to-image-generation)\n    - [图像到图像转换](#image-to-image-translation)\n    - [图像编辑](#image-editing)\n  - [视频生成](#video-generation)\n    - [无条件视频生成](#unconditional-video-generation)\n    - [条件视频生成](#conditional-video-generation)\n    - [具身智能](#embodied-ai)\n  - [3D 生成](#3d-generation)\n    - [运动生成](#motion-generation)\n    - [点云生成](#point-cloud-generation)\n    - [3D 医疗生成](#3d-medical-generation)\n  - [多模态生成](#multimodal-generation)\n    - [统一理解与生成的多模态大模型](#unified-understanding-and-generation-multi-modal-llms)\n  - [其他生成](#other-generation)\n  - [基准测试\u002F分析](#benchmark--analysis)\n  - [推理对齐](#reasoning-alignment)\n  - [安全](#safety)\n  - [加速](#accelerating)\n  - [稳定性与扩展](#stability--scaling)\n  - [教程](#tutorial)\n  - [评估指标](#evaluation-metrics)\n-----\n\n### 图像生成\n#### 无条件\u002F类别条件图像生成\n  - ##### 像素级生成\n    - **[ICML, 2021 口头报告]** 通过分布平滑改进自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.15089) [代码](https:\u002F\u002Fgithub.com\u002Fchenlin9\u002FAutoregressive-Modeling-with-Distribution-Smoothing)\n    - **[ICML, 2020]** **ImageGPT：** 从像素进行生成式预训练 [论文](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fchen20s\u002Fchen20s.pdf)\n    - **[ICML, 2018]** **Image Transformer** [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1802.05751) [代码](https:\u002F\u002Fgithub.com\u002Fneocxi\u002Fpixelsnail-public)\n    - **[ICML, 2018]** **PixelSNAIL：** 一种改进的自回归生成模型 [论文](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fchen18h\u002Fchen18h.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fneocxi\u002Fpixelsnail-public)\n    - **[ICML, 2017]** 并行多尺度自回归密度估计 [论文](https:\u002F\u002Fproceedings.mlr.press\u002Fv70\u002Freed17a.html)\n    - **[ICLR 工作坊, 2017]** **Gated PixelCNN：** 生成可解释且结构可控的图像 [论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=Hyvw0L9el)\n    - **[ICLR, 2017]** **PixelCNN++：** 通过离散逻辑混合似然及其他改进提升 PixelCNN [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1701.05517) [代码](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpixel-cnn)\n    - **[NeurIPS, 2016]** **PixelCNN** 条件图像生成与 PixelCNN 解码器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.05328) [代码](https:\u002F\u002Fgithub.com\u002Fanantzoid\u002FConditional-PixelCNN-decoder)\n    - **[ICML, 2016]** **PixelRNN** 像素递归神经网络 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1601.06759) [代码](https:\u002F\u002Fgithub.com\u002Fj-min\u002FPixelCNN)\n    \n  - ##### 令牌级生成\n    \n    ##### 令牌化器\n    - **[Arxiv, 2025.07]** 视觉基础模型作为自回归生成的有效视觉令牌化器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.08441) [代码](https:\u002F\u002Fgithub.com\u002FCVMI-Lab\u002FVFMTok)\n    - **[Arxiv, 2025.07]** 自回归图像生成的整体令牌化器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.02358) [代码](https:\u002F\u002Fgithub.com\u002FCVMI-Lab\u002FHita)\n    - **[Arxiv, 2025.06]** Instella-T2I：突破一维离散潜在空间图像生成的极限 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21022)\n    - **[Arxiv, 2025.05]** D-AR：通过自回归模型实现扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23660) [代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FD-AR)\n    - **[Arxiv, 2025.05]** 在一维潜在空间中学习自适应且具有时间因果性的视频令牌化 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.17011) [代码](https:\u002F\u002Fgithub.com\u002FVisionXLab\u002FAdapTok)\n    - **[Arxiv, 2025.04]** 为自回归图像生成提炼语义感知顺序 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.17069)\n    - **[Arxiv, 2025.04]** Token-Shuffle：迈向使用自回归模型的高分辨率图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.17789)\n    - **[CVPR, 2025]** 通过面向聚类的令牌预测改进自回归视觉生成 [代码](https:\u002F\u002Fgithub.com\u002Fsjtuplayer\u002FIAR) [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.00880)\n    - **[Arxiv, 2025.03]** 等变图像建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18948) [代码](https:\u002F\u002Fgithub.com\u002Fdrx-code\u002FEquivariantModeling)\n    - **[Arxiv, 2025.03]** V2Flow：统一视觉令牌化与大型语言模型词汇表，用于自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07493) [代码](https:\u002F\u002Fgithub.com\u002Fzhangguiwei610\u002FV2Flow)\n    - **[Arxiv, 2025.02]** **FlexTok：** 将图像重采样为灵活长度的一维令牌序列 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13967)\n    - **[Arxiv, 2025.01]** **ARFlow：** 混合线性注意力的自回归流 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.16085) [代码](https:\u002F\u002Fgithub.com\u002FTheFllood\u002FARFlow)\n    - **[Arxiv, 2024.12]** **TokenFlow：** 多模态理解与生成的统一图像令牌化器 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03069) [代码](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FTokenFlow)\n    - **[Arxiv, 2024.12]** 自回归视觉生成的下一补丁预测 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15321) [代码](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FNext-Patch-Prediction)\n    - **[Arxiv, 2024.12]** XQ-GAN：一个用于自回归生成的开源图像令牌化框架 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01762) [代码](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FImageFolder)\n    - **[Arxiv, 2024.12]** RandAR：仅解码器的随机顺序自回归视觉生成。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01827) [代码](https:\u002F\u002Fgithub.com\u002Fziqipang\u002FRandAR) [项目](https:\u002F\u002Frand-ar.github.io\u002F)\n    - **[Arxiv, 2024.11]** 随机自回归视觉生成。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00776) [代码](https:\u002F\u002Fgithub.com\u002Fbytedance\u002F1d-tokenizer) [项目](https:\u002F\u002Fyucornetto.github.io\u002Fprojects\u002Frar.html)\n    - **[Arxiv, 2024.09]** **Open-MAGVIT2：** 民主化自回归视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.04410) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FOpen-MAGVIT2)\n    - **[Arxiv, 2024.06]** **OmniTokenizer：** 用于视觉生成的图像-视频联合令牌化器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09399) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FOmniTokenizer)\n    - **[Arxiv, 2024.06]** 将 VQGAN 的码本大小扩展至 10 万，利用率高达 99% [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11837) [代码](https:\u002F\u002Fgithub.com\u002Fzh460045050\u002FVQGAN-LC)\n    - **[Arxiv, 2024.06]** **Titok** 一张图像在重建和生成中价值 32 个令牌 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07550) [代码](https:\u002F\u002Fgithub.com\u002Fbytedance\u002F1d-tokenizer)\n    - **[Arxiv, 2024.06]** 小波变换是自回归图像生成所需的全部 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19997)\n    - **[Arxiv, 2024.06]** **LlamaGen** 自回归模型胜过扩散模型：Llama 实现规模化图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.06525) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLlamaGen)\n    - **[ICLR, 2024]** **MAGVIT-v2** 语言模型胜过扩散模型——令牌化器是视觉生成的关键 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05737)\n    - **[ICLR, 2024]** **FSQ** 有限标量量化：让 VQ-VAE 变得简单 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15505) [代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Ffsq)\n    - **[ICCV, 2023]** **Efficient-VQGAN：** 以高效的视觉 Transformer 实现高分辨率图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05400)\n    - **[CVPR, 2023]** 迈向精确的图像编码：利用动态矢量量化改进自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.11718) [代码](https:\u002F\u002Fgithub.com\u002FCrossmodalGroup\u002FDynamicVectorQuantization)\n    - **[CVPR, 2023，亮点]** **MAGVIT：** 掩码生成式视频 Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n    - **[NeurIPS, 2023]** **MoVQ：** 调制量化向量以实现高保真图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.09002)\n    - **[BMVC, 2022]** 使用多模态交叉量化器进行无条件图文对生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07537) [代码](https:\u002F\u002Fgithub.com\u002Fttumyche\u002FMXQ-VAE)\n    - **[CVPR, 2022]** **RQ-VAE** 利用残差量化进行自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.01941) [代码](https:\u002F\u002Fgithub.com\u002Fkakaobrain\u002Frq-vae-transformer)\n    - **[ICLR, 2022]** **ViT-VQGAN** 改进的 VQGAN 实现矢量量化图像建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.04627)\n    - **[PMLR, 2021]** 利用稀疏表示生成图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.03841)\n    - **[CVPR, 2021]** **VQGAN** 用变压器驾驭高分辨率图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09841) [代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Ftaming-transformers)\n    - **[NeurIPS, 2019]** 利用 VQ-VAE-2 生成多样且高保真的图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.00446) [代码](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fvq-vae-2-pytorch)\n    - **[NeurIPS, 2017]** **VQ-VAE** 神经离散表征学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.00937)\n\n##### 自回归建模\n    - **[Arxiv, 2025.11]** InfinityStar：用于视觉生成的统一时空自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.04675) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinityStar)\n    - **[Arxiv, 2025.10]** FARMER：基于像素的流自回归Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.23588)\n    - **[Arxiv, 2025.10]** SSD：空间-语义头解耦以实现高效的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.18716)\n    - **[NeurIPS, 2025]** 视觉自回归模型在推理时间扩展性上超越扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16751)\n    - **[NeurIPS, 2025]** 更好更快的自回归图像生成：从熵的角度 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.09012) [代码](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FARsample)\n    - **[Arxiv, 2025.09]** 超球形潜在变量提升连续标记自回归生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.24335) [代码](https:\u002F\u002Fgithub.com\u002Fguolinke\u002FSphereAR)\n    - **[Arxiv, 2025.09]** 随心而行：为自回归图像生成扩展置信度 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26376) [代码](https:\u002F\u002Fgithub.com\u002FEnVision-Research\u002FScalingAR)\n    - **[NeurIPS, 2025]** 先理解再生成：自引导训练用于自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.15185) [代码](https:\u002F\u002Fgithub.com\u002Fyuexy\u002FST-AR)\n    - **[Arxiv, 2025.08]** 利用判别码本先验进行自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.10719)\n    - **[Arxiv, 2025.08]** NextStep-1：迈向大规模连续标记自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.10711) [代码](https:\u002F\u002Fgithub.com\u002Fstepfun-ai\u002FNextStep-1)\n    - **[Arxiv, 2025.07]** 频率感知自回归建模用于高效高分辨率图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.20454) [代码](https:\u002F\u002Fgithub.com\u002FCaesarhhh\u002FSparseVAR)\n    - **[Arxiv, 2025.07]** TTS-VAR：视觉自回归生成的测试时缩放框架 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.18537) [代码](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FTTS-VAR)\n    - **[Arxiv, 2025.07]** 转移匹配：可扩展且灵活的生成式建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23589)\n    - **[Arxiv, 2025.07]** 重新思考离散标记：将其视为连续自回归图像合成的条件 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.01756)\n    - **[CVPR, 2025]** OmniGen：统一图像生成 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FXiao_OmniGen_Unified_Image_Generation_CVPR_2025_paper.html) [代码](https:\u002F\u002Fgithub.com\u002FVectorSpaceLab\u002FOmniGen)\n    - **[Arxiv, 2025.06]** AR-RAG：用于图像生成的自回归检索增强 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.06962) [代码](https:\u002F\u002Fgithub.com\u002FPLUM-Lab\u002FAR-RAG)\n    - **[Arxiv, 2025.06]** 将自回归Transformer与扩散模型结合，采用多参考自回归 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09482) [代码](https:\u002F\u002Fgithub.com\u002FTransDiff\u002FTransDiff)\n    - **[Arxiv, 2025.06]** MADFormer：混合自回归与扩散Transformer用于连续图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.07999)\n    - **[Arxiv, 2025.06]** SpectralAR：频谱自回归视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.10962) [代码](https:\u002F\u002Fgithub.com\u002Fhuang-yh\u002FSpectralAR)\n    - **[Arxiv, 2025.06]** AliTok：迈向标记器与自回归模型之间序列建模的一致性 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.05289) [代码](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002Falitok)\n    - **[Arxiv, 2025.05]** DetailFlow：通过下一细节预测实现的1D粗细结合自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.21473) [代码](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FDetailFlow)\n    - **[Arxiv, 2025.05]** TensorAR：自回归图像生成中只需精细化即可 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16324)\n    - **[Arxiv, 2025.05]** MVAR：具有尺度和空间马尔可夫条件的视觉自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.12742) [代码](https:\u002F\u002Fgithub.com\u002FLabShuHangGU\u002FMVAR)\n    - **[ICML, 2025]** 通过分数最大化实现连续视觉自回归生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07812) [代码](https:\u002F\u002Fgithub.com\u002Fshaochenze\u002FEAR)\n    - **[Arxiv, 2025.04]** GigaTok：将视觉标记器扩展至30亿参数以支持自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.08736) [代码](https:\u002F\u002Fgithub.com\u002FSilentView\u002FGigaTok)\n    - **[Arxiv, 2025.03]** D2C：利用离散标记释放连续自回归图像生成的潜力 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17155)\n    - **[Arxiv, 2025.03]** 桥接连续与离散标记以实现自回归视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.16430) [代码](https:\u002F\u002Fgithub.com\u002Fyuqingwang1029\u002FTokenBridge)\n    - **[Arxiv, 2025.03]** 带有随机并行解码的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.10568) [代码](https:\u002F\u002Fgithub.com\u002Fhp-l33\u002FARPG)\n    - **[Arxiv, 2025.03]** 方向感知的对角线自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11129)\n    - **[Arxiv, 2025.03]** 邻近自回归建模用于高效视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10696) [代码](https:\u002F\u002Fgithub.com\u002FThisisBillhe\u002FNAR)\n    - **[Arxiv, 2025.03]** NFIG：带有下一频率预测的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07076)\n    - **[Arxiv, 2025.03]** 带有连续标记的频率自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.05305) [代码](https:\u002F\u002Fgithub.com\u002FyuhuUSTC\u002FFAR)\n    - **[Arxiv, 2025.03]** ARINAR：双层自回归逐特征生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02883) [代码](https:\u002F\u002Fgithub.com\u002FQinyu-Allen-Zhao\u002FArinar)\n    - **[Arxiv, 2025.02]** 不止于下一个标记：自回归视觉生成中的下一-X预测 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20388) [代码](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FxAR) [项目](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20388)\n    - **[Arxiv, 2025.02]** 分形生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17437) [代码](https:\u002F\u002Fgithub.com\u002FLTH14\u002Ffractalgen)\n    - **[Arxiv, 2025.01]** 关于从视频中进行自回归预训练的实证研究 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.05453)\n    - **[Arxiv, 2024.12]** E-CAR：通过多阶段建模实现高效的连续自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14170)\n    - **[Arxiv, 2024.12]** 驯服可扩展的视觉标记器以支持自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02692) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Voken)\n    - **[Arxiv, 2024.11]** 样本和参数高效的自回归图像模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.15648) [代码](https:\u002F\u002Fgithub.com\u002Felad-amrani\u002Fxtra)\n    - **[Arxiv, 2024.01]** 大型自回归图像模型的可扩展预训练 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08541) [代码](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-aim)\n    - **[Arxiv, 2024.10]** ImageFolder：带有折叠标记的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01756) [代码](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FImageFolder)\n    - **[Arxiv, 2024.10]** **SAR** 使用集合自回归建模定制你的视觉自回归配方 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10511) [代码](https:\u002F\u002Fgithub.com\u002Fpoppuppy\u002FSAR)\n    - **[Arxiv, 2024.08]** **AiM** 使用Mamba实现可扩展的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12245) [代码](https:\u002F\u002Fgithub.com\u002Fhp-l33\u002FAiM)\n    - **[Arxiv, 2024.06]** **ARM** 在视觉领域使用Mamba进行自回归预训练 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07537) [代码](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FARM)\n    - **[Arxiv, 2024.06]** **MAR** 无需向量量化即可进行自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11838) [代码](https:\u002F\u002Fgithub.com\u002FLTH14\u002Fmar)\n    - **[Arxiv, 2024.06]** **LlamaGen** 自回归模型击败扩散模型：Llama用于可扩展图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.06525) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLlamaGen)\n    - **[ICML, 2024]** **DARL**：去噪自回归表征学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.05196)\n    - **[ICML, 2024]** **DisCo-Diff**：用离散潜在变量增强连续扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03300) [代码](https:\u002F\u002Fgithub.com\u002Fgcorso\u002Fdisco-diffdock)\n    - **[ICML, 2024]** **DeLVM**：通过顺序自回归实现数据高效的大型视觉模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04841) [代码](https:\u002F\u002Fgithub.com\u002Fggjy\u002FDeLVM)\n    - **[AAAI, 2023]** **SAIM** 探索用于视觉表征的随机自回归图像建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.01610) [代码](https:\u002F\u002Fgithub.com\u002Fqiy20\u002FSAIM)\n    - **[NeurIPS, 2021]** **ImageBART**：使用多项式扩散提供上下文以进行自回归图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.08827) [代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fimagebart)\n    - **[CVPR, 2021]** **VQGAN**：驯服Transformer以实现高分辨率图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09841) [代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Ftaming-transformers)\n    - **[ECCV, 2020]** **RAL**：在自回归图像生成中引入强化对抗学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.09923)\n    - **[NeurIPS, 2019]** 使用VQ-VAE-2生成多样且高保真图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.00446) [代码](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fvq-vae-2-pytorch)\n    - **[NeurIPS, 2017]** **VQ-VAE**：神经离散表征学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.00937)\n\n  - ##### 分尺度自回归生成\n    - **[Arxiv, 2025.10]** 用于视觉自回归模型的动态专家混合 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08629)\n    - **[Arxiv, 2025.09]** SoftCFG：不确定性引导的稳定指导用于视觉自回归模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.00996)\n    - **[Arxiv, 2025.09]** 并非所有标记都同等重要：改进视觉自回归模型中的指导 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.23876)\n    - **[Arxiv, 2025.09]** 分尺度VAR实际上是离散扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.22636) [代码](https:\u002F\u002Fgithub.com\u002FVIROBO-15\u002FSRDD) [项目](https:\u002F\u002Fvirobo-15.github.io\u002Fsrdd.github.io\u002F)\n    - **[Arxiv, 2025.05]** 用于模型无关联邦MRI重建的生成式自回归Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04521) [代码](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FFedGAT)\n    - **[ICML, 2025]** 连续视觉自回归生成通过分数最大化 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07812) [代码](https:\u002F\u002Fgithub.com\u002Fshaochenze\u002FEAR)\n    - **[Arxiv, 2025.02]** **FlexVAR**：无残差预测的灵活视觉自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20313) [代码](https:\u002F\u002Fgithub.com\u002Fjiaosiyu1999\u002FFlexVAR)\n    - **[Arxiv, 2024.12]** **FlowAR**：分尺度自回归图像生成与流匹配相结合 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15205) [代码](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FFlowAR)\n    - **[Arxiv, 2024.11]** **M-VAR**：解耦的分尺度自回归建模用于高质量图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.10433) [代码](https:\u002F\u002Fgithub.com\u002FOliverRensu\u002FMVAR)\n    - **[NeurIPS 2024最佳论文]** **视觉自回归建模**：通过下一尺度预测实现可扩展图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02905) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR)\n\n#### 文本到图像生成\n- ##### 逐标记生成\n     - **[ICML, 2025]** 离散JEPA：无需重建即可学习离散标记表示 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.14373)\n     - **[Arxiv, 2025.04]** Lumina-mGPT 2.0：独立的自回归图像建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17801) [代码](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-mGPT-2.0)\n     - **[Arxiv, 2025.03]** Lumina-Image 2.0：统一且高效的图像生成框架 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21758?) [代码](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-Image-2.0)\n     - **[Arxiv, 2024.12]** Liquid：语言模型是可扩展的多模态生成器 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04332) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FLiquid)\n     - **[Arxiv, 2024.12]** Infinity：面向高分辨率图像合成的位级自回归建模扩展 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04431) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinity)\n     - **[Arxiv, 2024.12]** TokenFlow：用于多模态理解和生成的统一图像标记器 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03069) [代码](https:\u002F\u002Fgithub.com\u002FByteFlow-AI\u002FTokenFlow)\n     - **[Arxiv, 2024.11]** 通过预测下一个标记实现高分辨率图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.14808) [代码](https:\u002F\u002Fd-jepa.github.io\u002Ft2i\u002F)\n     - **[Arxiv, 2024.10]** **Fluid**：利用连续标记扩展自回归文本到图像生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.13863)\n     - **[Arxiv, 2024.10]** **DART**：去噪自回归Transformer，用于可扩展的文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.08159) [代码](https:\u002F\u002Fgithub.com\u002Fdaixiangzi\u002FVAR-CLIP)\n     - **[Arxiv, 2024.10]** **DnD-Transformer**：视觉—语言智能的火花：用于高效细粒度图像生成的二维自回归Transformer [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01912) [代码](https:\u002F\u002Fgithub.com\u002Fchenllliang\u002FDnD-Transformer)\n     - **[Arxiv, 2024.08]** **Lumina-mGPT**：通过多模态生成式预训练点亮灵活的逼真文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.02657) [代码](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-mGPT)\n     - **[Arxiv, 2024.07]** **MARS**：自回归模型混合，用于细粒度文本到图像合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07614) [代码](https:\u002F\u002Fgithub.com\u002Ffusiming3\u002FMARS)\n     - **[Arxiv, 2024.06]** **LLM4GEN**：利用LLM的语义表示进行文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00737) [代码](https:\u002F\u002Fgithub.com\u002FYUHANG-Ma\u002FLLM4GEN)\n     - **[Arxiv, 2024.06]** **STAR**：基于尺度的文本到图像生成，利用自回归表示 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10797) [代码](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAR)\n     - **[Arxiv, 2024.05]** **Kaleido Diffusion**：通过自回归潜在建模改进条件扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.21048)\n     - **[CVPR, 2024]** **超越文本**：冻结大型语言模型在视觉信号理解中的应用 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07874) [代码](https:\u002F\u002Fgithub.com\u002Fzh460045050\u002FV2L-Tokenizer)\n     - **[TOG, 2023]** **IconShop**：基于文本引导的向量图标合成，使用自回归Transformer（*svg图像*） [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.14400) [代码](https:\u002F\u002Fgithub.com\u002Fkingnobro\u002FIconShop)\n     - **[NeurIPS, 2023]** **LQAE** 语言量化自编码器：迈向无监督文本—图像对齐 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.00902) [代码](https:\u002F\u002Fgithub.com\u002Flhao499\u002Flanguage-quantized-autoencoders)\n     - **[TMLR, 2022.06]** **Parti**：扩展自回归模型以实现内容丰富的文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10789) [代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fparti)\n     - **[NeurIPS, 2022]** **CogView2**：通过层次化Transformer实现更快更好的文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.14217) [代码](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogView2)\n     - **[ECCV, 2022]** **Make-A-Scene**：基于场景的文本到图像生成，结合人类先验知识 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13131)\n     - **[CVPR, 2022]** **VQ-Diffusion**：用于文本到图像合成的向量量化扩散模型 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FGu_Vector_Quantized_Diffusion_Model_for_Text-to-Image_Synthesis_CVPR_2022_paper.html) [代码](https:\u002F\u002Fgithub.com\u002Fcientgu\u002FVQ-Diffusion)\n     - **[CVPR, 2022]** **Make-A-Story**：视觉记忆条件下的连贯故事生成（*讲故事*） [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.13319)\n     - **[NeurIPS, 2021]** **CogView**：通过Transformer掌握文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.13290) [代码](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogView)\n     - **[Arxiv, 2021.02]** **DALL-E 1**：零样本文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.12092)\n\n- ##### 按尺度生成\n     - **[Arxiv, 2024.12]** Infinity：面向高分辨率图像合成的位级自回归建模扩展 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04431) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FInfinity)\n     - **[Arxiv, 2024.12]** SWITTI：为文本到图像生成设计按尺度划分的Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01819) [代码](https:\u002F\u002Fgithub.com\u002Fyandex-research\u002Fswitti) [页面](https:\u002F\u002Fyandex-research.github.io\u002Fswitti\u002F)\n     - **[Arxiv, 2024.10]** **HART**：利用混合自回归Transformer实现高效视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10812) [代码](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Fhart)\n     - **[Arxiv, 2024.08]** **VAR-CLIP**：带有视觉自回归建模的文本到图像生成器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.01181) [代码](https:\u002F\u002Fgithub.com\u002Fdaixiangzi\u002FVAR-CLIP)\n     - **[Arxiv, 2024.06]** **STAR**：基于尺度的文本到图像生成，利用自回归表示 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10797) [代码](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAR)\n\n#### 图像到图像转换\n  - **[ICCV, 2025]** **CycleVAR**：将自回归模型重新用于无监督的一步式图像转换 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23347v1)\n  - **[ICML研讨会, 2024]** **MIS** 多对多图像生成，使用自回归扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.03109)\n  - **[Arxiv, 2024.03]** SceneScript：用自回归结构化语言模型重建场景 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13064) [项目](https:\u002F\u002Fwww.projectaria.com\u002Fscenescript\u002F)\n  - **[CVPR, 2024]** 序列建模使大型视觉模型能够进行可扩展学习 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FBai_Sequential_Modeling_Enables_Scalable_Learning_for_Large_Vision_Models_CVPR_2024_paper.html)\n  - **[ECCV, 2022]** **QueryOTR**：通过查询进行外延绘画 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.05312) [代码](https:\u002F\u002Fgithub.com\u002FKaiseem\u002FQueryOTR)\n  - **[NeurIPS, 2022]** 通过图像修复进行视觉提示 [论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Fhash\u002F9f09f316a3eaf59d9ced5ffaefe97e0f-Abstract-Conference.html)\n  - **[MM, 2021]** 多样化的图像修复，采用双向和自回归Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12335)\n\n#### 图像编辑\n  - **[Arxiv, 2025.09]** 面向下一尺度自回归文本驱动图像编辑的离散噪声反演 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01984v1) \n  - **[Arxiv, 2025.08]** 基于指令引导的视觉自回归模型用于图像编辑 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.15772) [代码](https:\u002F\u002Fgithub.com\u002FHiDream-ai\u002FVAREdit) \n  - **[Arxiv, 2025.08]** NEP：通过预测下一个编辑标记进行自回归图像编辑 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06044) [代码](https:\u002F\u002Fnep-bigai.github.io\u002F)\n  - **[Arxiv, 2025.07]** SCALAR：尺度可控的视觉自回归学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.19946) \n  - **[Arxiv, 2025.04]** 锚定标记匹配：无需训练的AR图像编辑中的隐式结构锁定 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.10434) [代码](https:\u002F\u002Fgithub.com\u002FhutaiHang\u002FATM)\n  - **[ICCV, 2025]** 基于视觉自回归模型的免训练文本引导图像编辑 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23897) [代码](https:\u002F\u002Fgithub.com\u002Fwyf0912\u002FAREdit)\n  - **[Arxiv, 2025.01]** EditAR：基于自回归模型的统一条件生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.04699) [代码](https:\u002F\u002Fgithub.com\u002FJitengMu\u002FEditAR)\n  - **[Arxiv, 2024.06]** CAR：用于视觉生成的可控自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.04671) [代码](https:\u002F\u002Fgithub.com\u002FMiracleDance\u002FCAR)\n  - **[ICLR, 2025]** **ControlAR**：基于自回归模型的可控图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02705) [代码](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FControlAR)\n  - **[Arxiv, 2024.06]** **ControlVAR**：探索可控的视觉自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09750) [代码](https:\u002F\u002Fgithub.com\u002Flxa9867\u002FControlVAR)\n  - **[Arxiv, 2024.06]** 医学视觉通用模型：在上下文中统一医学影像任务 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565)\n  - **[Arxiv, 2024.04]** **M2M** 多对多图像生成与自回归扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.03109)\n  - **[ECCV, 2022]** **VQGAN-CLIP**：自然语言指导下的开放域图像生成与编辑 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.08583)\n  - **[ECCV, 2022]** **Make-A-Scene**：基于场景的文本到图像生成，结合人类先验知识 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13131)\n  - **[ICIP, 2021]** **MSGNet**：生成包含多个连贯物体的高保真标注图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.12150)\n\n\n\n### 视频生成\n#### 无条件视频生成\n   - **[Arxiv, 2025.03]** FAR：适用于短时和长时上下文视频建模的帧自回归模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.19325) [代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FFAR)\n   - **[Arxiv, 2025.03]** HiTVideo：用于增强文本到视频生成的分层标记器，结合自回归大型语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11513) \n   - **[Arxiv, 2025.03]** AR-Diffusion：异步自回归扩散视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07418) [代码](https:\u002F\u002Fgithub.com\u002Fiva-mzsun\u002FAR-Diffusion)\n   - **[Arxiv, 2025.02]** 下一个区块预测：半自回归建模的视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.07737) [代码](https:\u002F\u002Fgithub.com\u002FRenShuhuai-Andy\u002FNBP)\n   - **[Arxiv, 2025.01]** 用于掩码自回归视频生成的驯服教师强迫法 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12389) [代码](https:\u002F\u002Fmagivideogen.github.io\u002F)\n   - **[Arxiv, 2024.10]** **LARP**：使用学习型自回归生成先验对视频进行标记化 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.21264)\n   - **[ECCV 2024]** **ST-LLM**：大型语言模型是高效的时间序列学习者 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00308) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FST-LLM)\n   - **[ICLR, 2024]** **MAGVIT-v2** 语言模型胜过扩散模型——标记化是视觉生成的关键 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05737)\n   - **[CVPR, 2023]** **PVDM** 投影潜在空间中的视频概率扩散模型 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYu_Video_Probabilistic_Diffusion_Models_in_Projected_Latent_Space_CVPR_2023_paper.html)\n   - **[ECCV, 2022]** 使用时间无关的VQGAN和时间敏感的Transformer进行长视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.03638) [代码](https:\u002F\u002Fgithub.com\u002FSongweiGe\u002FTATS)\n   - **[Arxiv, 2021.04]** **VideoGPT**：利用VQ-VAE和Transformer进行视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07143)\n   - **[Arxiv, 2020.06]** 潜在空间视频Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.10704) [代码](https:\u002F\u002Fgithub.com\u002Frakhimovv\u002Flvt)\n   - **[ICLR, 2020]** 自回归视频模型的扩展 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.02634)\n   - **[CVPR, 2018]** **MoCoGAN**：分解运动与内容以进行视频生成 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FTulyakov_MoCoGAN_Decomposing_Motion_CVPR_2018_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fsergeytulyakov\u002Fmocogan)\n   - **[ICML, 2017]** 视频像素网络 [论文](https:\u002F\u002Fproceedings.mlr.press\u002Fv70\u002Fkalchbrenner17a.html?ref=https:\u002F\u002Fgithubhelp.com)\n\n#### 条件视频生成\n   - ##### 文本到视频生成\n    - **[Arxiv, 2025.10]** 均匀离散扩散与度量路径用于视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.24717) [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FURSA) [页面](https:\u002F\u002Fbitterdhg.github.io\u002FURSA_page\u002F)\n     - **[Arxiv, 2025.10]** 超越下一帧预测的自回归视频生成 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.24081) \n     - **[Arxiv, 2025.07]** Lumos-1：从统一模型视角看自回归视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.08801) [代码](https:\u002F\u002Fgithub.com\u002Falibaba-damo-academy\u002FLumos)\n     - **[Arxiv, 2025.05]** 生成式预训练自回归扩散Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07344) \n     - **[Arxiv, 2024.12]** **DiCoDe**：用于语言模型自回归视频生成的扩散压缩深度标记 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04446) [页面](https:\u002F\u002Fliyizhuo.com\u002FDiCoDe\u002F)\n     - **[Arxiv, 2024.11]** Ca2-VDM：具有因果生成和缓存共享的高效自回归视频扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16375) [代码](https:\u002F\u002Fgithub.com\u002FDawn-LX\u002FCausalCache-VDM\u002F)\n     - **[Arxiv, 2024.10]** **ARLON**：利用自回归模型增强扩散Transformer以生成长视频 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20502) [代码](http:\u002F\u002Faka.ms\u002Farlon)\n     - **[Arxiv, 2024.10]** 渐进式自回归视频扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.08151) [代码](https:\u002F\u002Fgithub.com\u002Fdesaixie\u002Fpa_vdm)\n     - **[Arxiv, 2024.10]** **Pyramid Flow**：用于高效视频生成建模的金字塔流匹配 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05954) [代码](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FPyramid-Flow)\n     - **[Arxiv, 2024.10]** **Loong**：使用自回归语言模型生成分钟级长视频 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.02757)\n     - **[Arxiv, 2024.06]** **Pandora**：迈向具备自然语言动作与视频状态的通用世界模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09455) [代码](https:\u002F\u002Fgithub.com\u002Fmaitrix-org\u002FPandora)\n     - **[Arxiv, 2024.06]** **iVideoGPT**：交互式VideoGPT是可扩展的世界模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223) [代码](https:\u002F\u002Fgithub.com\u002Fthuml\u002FiVideoGPT)\n     - **[Arxiv, 2024.06]** **ViD-GPT**：在视频扩散模型中引入GPT风格的自回归生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10981) [代码](https:\u002F\u002Fgithub.com\u002FDawn-LX\u002FCausal-VideoGen)\n     - **[Arxiv, 2024.02]** **LWM**：基于分块环形注意力的百万长度视频与语言世界模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.08268) [代码](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM)\n     - **[CVPR, 2024]** **ART-V**：利用扩散模型进行自回归文本到视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.18834)\n     - **[NeurIPS, 2022]** **NUWA-Infinity**：通过自回归叠加实现无限视觉合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09814) [代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n     - **[ECCV, 2022]** **NÜWA**：用于神经视觉世界创造的视觉合成预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.12417) [代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n     - **[Arxiv, 2022.05]** **CogVideo**：通过Transformer进行大规模文本到视频生成预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.15868) [代码](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogVideo)\n     - **[Arxiv, 2022.05]** **GODIVA**：根据自然描述生成开放领域视频 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14806)\n     - **[IJCAI, 2021]** **IRC-GAN**：用于文本到视频生成的内省递归卷积GAN。 [论文](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0307.pdf)\n    \n   - ##### 视觉条件视频生成\n     - **[Arxiv, 2025.06]** VideoMAR：使用连续标记的自回归视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.14168) [代码](https:\u002F\u002Fyuhuustc.github.io\u002F\u002Fprojects\u002FVideoMAR.html)\n     - **[Arxiv, 2025.06]** DeepVerse：作为世界模型的4D自回归视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01103) [代码](https:\u002F\u002Fgithub.com\u002FSOTAMak1r\u002FDeepVerse)\n     - **[Arxiv, 2025.05]** 通过下一片段扩散的Video-GPT [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.12489) [代码](https:\u002F\u002Fgithub.com\u002Fzhuangshaobin\u002FVideo-GPT)\n     - **[Arxiv, 2025.04]** GenDoP：作为摄影指导的自回归摄像机轨迹生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.07083) [代码](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FGenDoP)\n     - **[Arxiv, 2024.10]** **MarDini**：用于规模化视频生成的掩码自回归扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20502)\n     - **[CVPR, 2024]** **LVM** 序列建模使大型视觉模型能够进行可扩展学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00785) [代码](https:\u002F\u002Fgithub.com\u002Fytongbai\u002FLVM)\n     - **[ICIP, 2022]** **HARP**：具有高保真图像生成器的自回归潜在视频预测 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07143)\n     - **[Arxiv, 2021.03]** 使用VQVAE预测视频 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01950)\n     - **[CVPR, 2021]** 利用cINNs进行随机图像到视频合成 [论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FDorkenwald_Stochastic_Image-to-Video_Synthesis_Using_cINNs_CVPR_2021_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fimage2video-synthesis-using-cINNs)\n     - **[ICLR, 2019]** Eidetic 3d lstm：一种用于视频预测及更多用途的模型 [论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=B1lKS2AqtX)\n     - **[ICLR, 2018]** 随机变分视频预测 [论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=rk49Mg-CW)\n     - **[NeurIPS, 2017]** **Predrnn**：利用时空LSTM进行预测性学习的循环神经网络 [论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2017\u002Ffile\u002Fe5f6ad6ce374177eef023bf5d0c018b6-Paper.pdf)\n     - **[NeurIPS, 2015]** 卷积LSTM网络：一种用于降水临近预报的机器学习方法 [论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2015\u002Ffile\u002F07563a3fe3bbe7e3ba84431ad9d055af-Paper.pdf)\n\n- ##### 多模态条件视频生成\n      - **[Arxiv, 2025.01]** VideoAuteur: 通往长篇叙事视频生成之路  [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.06173) [代码](https:\u002F\u002Fgithub.com\u002Flambert-x\u002FVideoAuteur)\n      - **[Arxiv, 2024.12]** 无需向量量化自回归视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14169) [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FNOVA)\n      - **[ICML, 2024]** **Video-LaVIT**: 基于解耦视觉-动作标记化的统一视频-语言预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.03161) [代码](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FLaVIT)\n      - **[ICML, 2024]** **VideoPoet**: 用于零样本视频生成的大型语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14125)\n      - **[CVPR, 2023]** **MAGVIT**: 掩码生成式视频Transformer [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n      - **[CVPR, 2022]** 让它动起来：基于文本描述的可控图像到视频生成 [论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FHu_Make_It_Move_Controllable_Image-to-Video_Generation_With_Text_Descriptions_CVPR_2022_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002FYouncy-Hu\u002FMAGE)\n\n#### 自回归扩散驱动视频生成\n  - **[Arxiv, 2025.10]** 实时运动可控自回归视频扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08131) [代码](https:\u002F\u002Fkesenzhao.github.io\u002FAR-Drag.github.io\u002F)\n  - **[Arxiv, 2025.10]** 自我驱动++：迈向分钟级高质量视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.02283) [代码](https:\u002F\u002Fgithub.com\u002Fjustincui03\u002FSelf-Forcing-Plus-Plus)\n  - **[Arxiv, 2025.10]** 打包并驱动你的记忆：长篇且一致的视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.01784) [代码](https:\u002F\u002Fgithub.com\u002Fwuxiaofei01\u002FPFVG)\n  - **[Arxiv, 2025.09]** LongLive：实时交互式长视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22622) [代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FLongLive)\n  - **[Arxiv, 2025.09]** 滚动驱动：实时自回归长视频扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25161v1) [网页](https:\u002F\u002Fkunhao-liu.github.io\u002FRolling_Forcing_Webpage\u002F) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FRollingForcing)\n  - **[Arxiv, 2025.08]** MIDAS：通过实时自回归视频生成实现多模态交互式数字人合成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19320) [项目](https:\u002F\u002Fchenmingthu.github.io\u002Fmilm\u002F)\n  - **[Arxiv, 2025.08]** 高质量与并行化自回归长视频生成的宏观-微观规划 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2508.03334) [项目](https:\u002F\u002Fnju-xunzhixiang.github.io\u002FAnchor-Forcing-Page\u002F) [代码](https:\u002F\u002Fgithub.com\u002Fxbxsxp9\u002FMMPL)\n  - **[Arxiv, 2025.05]** 自我驱动：弥合自回归视频扩散中的训练-测试差距 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08009) [代码](https:\u002F\u002Fgithub.com\u002Fguandeh17\u002FSelf-Forcing)\n  - **[Arxiv, 2025.04]** **MAGI-1**: 规模化自回归视频生成 [论文](https:\u002F\u002Fstatic.magi.world\u002Fstatic\u002Ffiles\u002FMAGI_1.pdf) [代码](https:\u002F\u002Fgithub.com\u002FSandAI-org\u002FMAGI-1)\n  - **[Arxiv, 2025.04]** SkyReels-V2：无限长度电影生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.13074) [代码](https:\u002F\u002Fgithub.com\u002FSkyworkAI\u002FSkyReels-V2)\n  - **[Arxiv, 2025.04]** 在下一帧预测模型中打包输入帧上下文以进行视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12626) [代码](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12626)\n  - **[CVPR, 2025]** AR-Diffusion：基于自回归扩散的异步视频生成 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FSun_AR-Diffusion_Asynchronous_Video_Generation_with_Auto-Regressive_Diffusion_CVPR_2025_paper.html) [代码](https:\u002F\u002Fgithub.com\u002Fiva-mzsun\u002FAR-Diffusion)\n  - **[CVPR, 2025]** 从慢速双向到快速自回归视频扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07772) [代码](https:\u002F\u002Fgithub.com\u002Ftianweiy\u002FCausVid)\n  - **[NeurIPS, 2024]** FIFO-Diffusion：无需训练即可从文本生成无限视频 [论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2024\u002Fhash\u002Fa397986e0f34d4b1f0b640686ceaeff7-Abstract-Conference.html) [代码](https:\u002F\u002Fgithub.com\u002Fjjihwan\u002FFIFO-Diffusion_public)\n\n#### 身体化AI\n   - **[Arxiv, 2025.03]** HybridVLA：在统一的视觉-语言-行动模型中实现扩散与自回归的协同 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10631) [代码](https:\u002F\u002Fgithub.com\u002FPKU-HMI-Lab\u002FHybrid-VLA)\n   - **[Arxiv, 2024.12]** **Diffusion-VLA**：通过统一的扩散与自回归扩展机器人基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03293) [页面](https:\u002F\u002Fdiffusion-vla.github.io\u002F)\n   - **[Arxiv, 2024.10]** **Gr-2**：具有网络规模知识的生成式视频-语言-行动模型，用于机器人操作 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06158)\n   - **[Arxiv, 2024.05]** **iVideoGPT**：交互式VideoGPT是可扩展的世界模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223)\n   - **[ICML, 2024]** **Genie**：生成式交互环境 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15223)\n   - **[ICLR, 2024]** **GR-1**：释放大规模视频生成预训练以用于视觉机器人操作 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.13139)\n   - **[ICLR, 2023]** **IRIS** Transformer是样本高效的世界模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.05199)\n\n### 3D生成\n#### 动作生成\n  - **[Arxiv, 2025.06]** 自回归表面切割 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18017) [代码](https:\u002F\u002Fvictorcheung12.github.io\u002Fseamgpt\u002F)\n  - **[CVPR, 2025]** **Teller**: 基于自回归动作生成的实时流式音频驱动肖像动画 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18429) [页面](https:\u002F\u002Fteller-avatar.github.io\u002F)\n  - **[CVPR, 2025]** **ScaMo**: 探索自回归动作生成模型中的规模定律 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14559) [代码](https:\u002F\u002Fgithub.com\u002Fshunlinlu\u002FScaMo_code)\n  - **[AAAI, 2024]** **AMD**: 自回归动作扩散 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.09381) [代码](https:\u002F\u002Fgithub.com\u002Ffluide1022\u002FAMD)\n  - **[ECCV, 2024]** **BAMM**: 双向自回归动作模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19435) [代码](https:\u002F\u002Fgithub.com\u002Fexitudio\u002FBAMM\u002F?tab=readme-ov-file)\n  - **[CVPR, 2023]** **T2M-GPT**: 基于离散表示从文本描述生成人体动作 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.06052)\n  - **[Arxiv, 2022]** **HiT-DVAE**: 基于层次化Transformer动力学VAE的人体动作生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01565)\n  - **[ICCV, 2021口头报告]** **HuMoR**: 用于鲁棒姿态估计的3D人体动作模型 [论文](https:\u002F\u002Fgeometry.stanford.edu\u002Fprojects\u002Fhumor\u002Fdocs\u002Fhumor.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fdavrempe\u002Fhumor)\n\n#### 点云生成\n   - **[Arxiv, 2025.06]** ShapeLLM-Omni: 用于3D生成与理解的原生多模态LLM [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01853) [代码](https:\u002F\u002Fgithub.com\u002FJAMESYJL\u002FShapeLLM-Omni\u002F)\n   - **[Siggraph, 2025]** OctGPT: 基于八叉树的多尺度自回归模型用于3D形状生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.09975) [代码](https:\u002F\u002Fgithub.com\u002Foctree-nn\u002Foctgpt)\n   - **[Arxiv, 2025.04]** 基于八叉树的自适应标记化实现高效自回归形状生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02817) [页面](https:\u002F\u002Foat-3d.github.io\u002F)\n   - **[CVPR, 2025]** TreeMeshGPT: 基于自回归树序列的艺术化网格生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11629) [代码](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FTreeMeshGPT)\n   - **[Arxiv, 2025]** 通过自回归上采样生成3D点云 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08594)\n   - **[Arxiv, 2024.02]** 将自回归模型在3D形状生成领域的容量与可扩展性推向极限 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12225)\n   - **[ECCV, 2022]** 通过规范映射进行自回归3D形状生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01955)\n   - **[CVPR研讨会, 2023]** 八叉树Transformer：基于层次结构序列的自回归3D形状生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.12480)\n\n#### 3D医学生成\n  - **[Arxiv, 2024]** 用于3D医学图像表示的自回归序列建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.08691v1) \n  - **[Arxiv, 2024]** 医学视觉通用模型：在上下文中统一医学影像任务 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565) [代码](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.05565)\n  - **[MIDL, 2024]** 基于VQGAN和时序无关掩码Transformer的条件化3D脑肿瘤ROI生成 [论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LLoSHPorlM)\n  - **[NMI, 2024]** 脑部的真实感形态保持生成建模 [论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs42256-024-00864-0) [代码](https:\u002F\u002Fgithub.com\u002FAmigoLab\u002FBrainSynth)\n  - **[Arxiv, 2023]** 利用向量量化生成对抗网络在MRI中生成3D脑肿瘤区域 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01251)\n  - **[ICCV, 2023]** 使用条件向量量化编码扩散结合Transformer实现未对齐的2D到3D转换 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FCorona-Figueroa_Unaligned_2D_to_3D_Translation_with_Conditional_Vector-Quantized_Code_Diffusion_ICCV_2023_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fsamb-t\u002Fx2ct-vqvae)\n  - **[MICCAI, 2022]** 形态保持的自回归3D脑部生成建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.03177) [代码](https:\u002F\u002Fgithub.com\u002FAmigoLab\u002FSynthAnatomy)\n\n### 多模态生成\n#### 统一理解与生成的多模态大语言模型\n  - **[NeurIPS, 2025]** JavisGPT：用于音视频理解和生成的统一多模态大语言模型 [论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=MZoOpD9NHV) \n  - **[Arxiv, 2025.10]** 波粒二象性（连续-离散）双重视觉标记化，用于统一理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.01593) [代码](https:\u002F\u002Fgithub.com\u002FCHEN-YIZHU\u002FWPIT)\n  - **[Arxiv, 2025.10]** NExT-OMNI：迈向任意到任意的全模态基础模型，采用离散流匹配 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13721) \n  - **[Arxiv, 2025.10]** Emu3.5：原生多模态模型是世界学习者 [论文](https:\u002F\u002Femu.world\u002FEmu35_tech_report.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu3.5)\n  - **[Arxiv, 2025.10]** PairUni：用于统一多模态语言模型的成对训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.25682) [代码](https:\u002F\u002Fgithub.com\u002FHaochen-Wang409\u002FPairUni)\n  - **[Arxiv. 2025.10]** LightBagel：一种轻量级、双重融合框架，用于统一多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.22946) [页面](https:\u002F\u002Fucsc-vlaa.github.io\u002FLightBagel\u002F)\n  - **[Arxiv. 2025.10]** 用相机思考：一种以相机为中心的统一多模态模型，用于理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08673) [代码](https:\u002F\u002Fgithub.com\u002FKangLiao929\u002FPuffin)\n  - **[Arxiv, 2025.10]** SRUM：面向统一多模态模型的细粒度自我奖励机制 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.12784) [代码](https:\u002F\u002Fgithub.com\u002FWayneJin0918\u002FSRUM)\n  - **[Arxiv, 2025.10]** UniFlow：一种用于视觉理解和生成的统一像素流标记器 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.10575) [代码](https:\u002F\u002Fgithub.com\u002FZhengrongYue\u002FUniFlow)\n  - **[Arxiv, 2025.10]** UniVideo：用于视频的统一理解、生成和编辑 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08377) [页面](https:\u002F\u002Fcongwei1230.github.io\u002FUniVideo\u002F)\n  - **[Arxiv, 2025.10]** Ming-UniVision：使用统一的连续标记器进行图像理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.06590) [代码](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing-UniVision)\n  - **[Arxiv, 2025.10]** Lumina-DiMOO：一种全模态扩散大型语言模型，用于多模态生成和理解 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.06308) [代码](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-DiMOO) [页面](https:\u002F\u002Fsynbol.github.io\u002FLumina-DiMOO\u002F)\n  - **[Arxiv, 2025.09]** Query-Kontext：一种用于图像生成和编辑的统一多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26641) \n  - **[Arxiv, 2025.09]** Lavida-O：弹性大型掩码扩散模型，用于统一多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.19244) \n  - **[Arxiv, 2025.09]** MANZANO：一种简单且可扩展的统一多模态模型，配备混合视觉标记器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.16197) \n  - **[Arxiv, 2025.09]** RecA：重建对齐提升统一多模态模型性能 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.07295) [代码](https:\u002F\u002Fgithub.com\u002FHorizonWind2004\u002Freconstruction-alignment)\n  - **[Arxiv, 2025.09]** 交错推理以改善文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.06945) [代码](https:\u002F\u002Fgithub.com\u002FOsilly\u002FInterleaving-Reasoning-Generation)\n  - **[Arxiv, 2025.09]** OneCAT：仅解码器的自回归模型，用于统一理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.03498) [代码](https:\u002F\u002Fgithub.com\u002Fonecat-ai\u002Fonecat) [页面](https:\u002F\u002Fgithub.com\u002Fonecat-ai\u002Fonecat)\n  - **[Arxiv, 2025.08]** TBAC-UniImage：通过阶梯式扩散调优实现统一理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.08098) [代码](https:\u002F\u002Fgithub.com\u002FDruryXu\u002FTBAC-UniImage)\n  - **[Arxiv, 2025.08]** Bifrost-1：通过补丁级CLIP潜在表示连接多模态大语言模型和扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.05954) [代码](https:\u002F\u002Fgithub.com\u002FHL-hanlin\u002FBifrost-1)\n  - **[Arxiv, 2025.08]** Uni-COT：迈向跨文本和视觉的统一思维链推理 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.05606) [代码](https:\u002F\u002Fgithub.com\u002FFr0zenCrane\u002FUniCoT)\n  - **[Arxiv, 2025.08]** UniEdit-I：通过迭代理解、编辑和验证，实现无需训练的统一VLM图像编辑 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03142) \n  - **[Arxiv, 2025.08]** Skywork UniPic：用于视觉理解和生成的统一自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03320) [代码](https:\u002F\u002Fgithub.com\u002FSkyworkAI\u002FUniPic)\n  - **[Arxiv, 2025.07]** Omni-Video：民主化统一视频理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06119) [代码](https:\u002F\u002Fgithub.com\u002FSAIS-FUXI\u002FOmni-Video)\n  - **[Arxiv, 2025.07]** Ovis-U1技术报告 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23044) [代码](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FOvis-U1)\n  - **[Qwen, 2025.07]** Qwen VLo：从“理解”世界到“描绘”世界 [博客](https:\u002F\u002Fqwenlm.github.io\u002Fblog\u002Fqwen-vlo\u002F) \n  - **[ICCV, 2025]** USP：用于图像生成和理解的统一自监督预训练 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06132) [代码](https:\u002F\u002Fgithub.com\u002FAMAP-ML\u002FUSP)\n  - **[Arxiv, 2025.06]** UniCode²：用于统一多模态理解和生成的级联大规模编码本 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.20214) \n  - **[Arxiv, 2025.06]** OmniGen2：探索先进的多模态生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18871) [代码](https:\u002F\u002Fgithub.com\u002FVectorSpaceLab\u002FOmniGen2)\n  - **[Arxiv, 2025.06]** 视觉作为一种方言：通过文本对齐的表征统一视觉理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18898) [代码](https:\u002F\u002Fgithub.com\u002Fcsuhan\u002FTar)\n  - **[Arxiv, 2025.06]** UniFork：探索模态对齐以实现统一多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.17202) [代码](https:\u002F\u002Fgithub.com\u002Ftliby\u002FUniFork)\n  - **[Arxiv, 2025.06]** Show-o2：改进的原生统一多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.15564) [代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FShow-o)\n  - **[Arxiv, 2025.06]** Ming-Omni：一种用于感知和生成的统一多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09344) [代码](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing\u002Ftree\u002Fmain)\n  - **[Arxiv, 2025.06]** Pisces：一个用于图像理解和生成的自回归基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.10395) \n  - **[Arxiv, 2025.06]** UniWorld：高分辨率语义编码器，用于统一视觉理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03147) [代码](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FUniWorld-V1)\n  - **[Arxiv, 2025.06]** ShapeLLM-Omni：一个用于3D生成和理解的原生多模态大语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01853) [代码](https:\u002F\u002Fgithub.com\u002FJAMESYJL\u002FShapeLLM-Omni\u002F)\n  - **[Arxiv, 2025.05]** Muddit：借助统一的离散扩散模型，将生成能力从文本到图像扩展到更广泛领域 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23606) [代码](https:\u002F\u002Fgithub.com\u002FM-E-AGI-Lab\u002FMuddit)\n  - **[Arxiv, 2025.05]** OpenUni：一个用于统一多模态理解和生成的简单基线 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23661) [代码](https:\u002F\u002Fgithub.com\u002Fwusize\u002FOpenUni)\n  - **[Arxiv, 2025.05]** FUDOKI：基于离散流的统一理解和生成，通过动力学最优速度实现 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20147)\n  - **[Arxiv, 2025.05]** MMaDA：多模态大型扩散语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.15809) [代码](https:\u002F\u002Fgithub.com\u002FGen-Verse\u002FMMaDA)\n  - **[Arxiv, 2025.05]** Ming-Lite-Uni：自然多模态交互的统一架构进展 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.02471) [代码](https:\u002F\u002Fgithub.com\u002FinclusionAI\u002FMing\u002Ftree\u002Fmain\u002FMing-unify)\n  - **[Arxiv, 2025.05]** 统一多模态预训练中的新兴属性 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14683) [代码](https:\u002F\u002Fgithub.com\u002Fbytedance-seed\u002FBAGEL)\n  - **[Arxiv, 2025.05]** BLIP3-o：一个完全开放的统一多模态模型家族——架构、训练和数据集 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.09568) [代码](https:\u002F\u002Fgithub.com\u002FJiuhaiChen\u002FBLIP3o)\n  - **[Arxiv, 2025.05]** Selftok：自回归、扩散及推理用的离散视觉标记 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07538) [项目](https:\u002F\u002Fselftok-team.github.io\u002Freport\u002F)\n  - **[Arxiv, 2025.05]** Nexus-Gen：一个用于图像理解、生成和编辑的统一模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.21356) [代码](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002FNexus-Gen)\n  - **[Arxiv, 2025.05]** TokLIP：将视觉标记嫁接到CLIP上，用于多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05422) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FTokLIP)\n  - **[Arxiv, 2025.05]** Mogao：一个用于交错多模态生成的全模态基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05472) \n  - **[Arxiv, 2025.04]** **VARGPT-v1.1**：通过迭代指令调优和强化学习改进视觉自回归大型统一模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02949) [代码](https:\u002F\u002Fgithub.com\u002FVARGPT-family\u002FVARGPT-v1.1)\n  - **[Arxiv, 2025.04]** ILLUME+：通过双重视觉标记化和扩散精炼照亮统一MLLM [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.01934) [代码](https:\u002F\u002Fgithub.com\u002Fillume-unified-mllm\u002FILLUME_plus)\n  - **[OpenAI, 2025.03]** GPT-4o系统卡附录：原生图像生成 [论文](https:\u002F\u002Fcdn.openai.com\u002F11998be9-5319-4302-bfbf-1167e093f1fb\u002FNative_Image_Generation_System_Card.pdf)\n  - **[Arxiv, 2025.03]** 为统一多模态理解和生成协调视觉表征 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21979) [代码](https:\u002F\u002Fgithub.com\u002Fwusize\u002FHarmon)\n  - **[Arxiv, 2025.03]** 使用连续标记进行统一的自回归视觉生成和理解 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.13436) \n  - **[Arxiv, 2025.03]** DualToken：迈向通过双重视觉词汇统一视觉理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.14324) \n  - **[Arxiv, 2025.03]** OmniMamba：通过状态空间模型实现高效且统一的多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08686) [代码](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FOmniMamba)\n  - **[Arxiv, 2025.02]** UniTok：用于视觉生成和理解的统一标记器 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20321) [代码](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FUniTok)\n  - **[Arxiv, 2025.02]** **HermesFlow**：无缝弥合多模态理解和生成之间的差距 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.12148) [代码](https:\u002F\u002Fgithub.com\u002FGen-Verse\u002FHermesFlow)\n  - **[Arxiv, 2025.02]** **QLIP**：文本对齐的视觉标记化统一了自回归多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.05178) [代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FQLIP)\n  - **[Arxiv, 2025.01]** **Janus-Pro**：通过数据和模型扩展实现统一多模态理解和生成 [论文](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus\u002Fblob\u002Fmain\u002Fjanus_pro_tech_report.pdf) [代码](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2025.01]** **VARGPT**：在视觉自回归多模态大型语言模型中实现统一理解和生成 [论文](https:\u002F\u002Fpdf.arxiv.org\u002Fabs\u002F2501.12327) [代码](https:\u002F\u002Fgithub.com\u002FVARGPT-family\u002FVARGPT)\n  - **[Arxiv, 2024.12]** **LlamaFusion**：将预训练语言模型适配用于多模态生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15188) \n  - **[Arxiv, 2024.12]** **MetaMorph**：通过指令调优实现多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14164) [页面](https:\u002F\u002Ftsb0601.github.io\u002Fmetamorph\u002F)\n  - **[Arxiv, 2024.12]** **Orthus**：具有模态特异性头部的自回归交错图像-文本生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00127)\n  - **[Arxiv, 2024.12]** 多模态潜在语言建模与下一个标记扩散。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.08635) \n  - **[Arxiv, 2024.12]** **ILLUME**：点亮您的LLM，使其能够看见、绘制并自我增强。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.06673) \n  - **[Arxiv, 2024.11]** **JanusFlow**：调和自回归与修正流，以实现统一多模态理解和生成。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.07975) [项目](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2024.11]** 针对多模态大型语言模型的统一生成与判别训练。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00304) [项目](https:\u002F\u002Fsugar-mllm.github.io\u002F)\n  - **[Arxiv, 2024.10]** **D-JEPA**：采用联合嵌入预测架构进行去噪 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03755) [项目](https:\u002F\u002Fd-jepa.github.io\u002F)\n  - **[Arxiv, 2024.10]** **Janus**：解耦视觉编码，以实现统一多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13848) [代码](https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FJanus)\n  - **[Arxiv, 2024.10]** **MMAR**：迈向无损多模态自回归概率建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10798) [代码](https:\u002F\u002Fgithub.com\u002FydcUstc\u002FMMAR)\n  - **[Arxiv, 2024.10]** **ACDC**：利用扩散校正进行自回归连贯多模态生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.04721) [代码](https:\u002F\u002Facdc2025.github.io\u002F)\n  - **[Arxiv, 2024.09]** **Emu3**：下一个标记预测就是你所需要的 论文名称。 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18869) [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu3) [项目](https:\u002F\u002Femu.baai.ac.cn\u002Fabout)\n  - **[Arxiv, 2024.09]** **VILA-U**：一个整合视觉理解和生成的统一基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04429) [代码](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Fvila-u)\n  - **[Arxiv, 2024.09]** **MIO**：一个基于多模态标记的基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.17692) \n  - **[Arxiv, 2024.08]** **Show-o**：一个单一的Transformer即可统一多模态理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12528) [代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FShow-o)\n  - **[Arxiv, 2024.08]** **Transfusion**：用一个多模态模型预测下一个标记并扩散图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.11039) [代码](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Ftransfusion-pytorch)\n  - **[Arxiv, 2024.07]** **SEED-Story**：使用大型语言模型进行多模态长篇故事生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08683) [代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Story)\n  - **[Arxiv, 2024.05]** **Chameleon**：混合模态早期融合基础模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.09818) [代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fchameleon)\n  - **[Arxiv, 2024.04]** **SEED-X**：具有统一多粒度理解和生成能力的多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.14396) [代码](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED-X)\n  - **[ICML, 2024]** **Libra**：在大型语言模型上构建解耦的视觉系统 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.10140) [代码](https:\u002F\u002Fgithub.com\u002FYifanXu74\u002FLibra)\n  - **[CVPR, 2024]** **Unified-IO 2**：通过视觉、语言、音频和动作扩展自回归多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.17172) [代码](https:\u002F\u002Fgithub.com\u002Fallenai\u002Funified-io-2)\n  - **[CVPR, 2024]** **Anole**：一个开放、自回归且原生的多模态模型，用于交错图像-文本生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06135) [代码](https:\u002F\u002Fgithub.com\u002FGAIR-NLP\u002Fanole)\n  - **[Arxiv, 2023.11]** **InstructSeq**：通过指令条件下的多模态序列生成统一视觉任务 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.18835) [代码](https:\u002F\u002Fgithub.com\u002Frongyaofang\u002FInstructSeq)\n  - **[ICLR, 2024]** **Kosmos-G**：使用多模态大型语言模型在上下文中生成图像 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.02992) [代码](https:\u002F\u002Fgithub.com\u002Fxichenpan\u002FKosmos-G)\n  - **[ICLR, 2024]** **LaVIT**：在LLM中进行动态离散视觉标记化的统一语言-视觉预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.04669) [代码](https:\u002F\u002Fgithub.com\u002Fjy0205\u002FLaVIT)\n  - **[ICLR, 2024]** **SEED-LLaMA**：通过SEED标记使LLaMA能够看见和绘画 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01218) [代码](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED)\n  - **[ICLR, 2024]** **EMU**：在多模态中进行生成式预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.05222) [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FEmu)\n  - **[Arxiv, 2023.09]** **CM3Leon**：扩展自回归多模态模型：预训练和指令调优 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.02591) [代码](https:\u002F\u002Fgithub.com\u002Fkyegomez\u002FCM3Leon)\n  - **[Arxiv, 2023.07]** **SEED**：在大型语言模型中播下视觉的种子 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.08041) [代码](https:\u002F\u002Fgithub.com\u002FAILab-CVC\u002FSEED)\n  - **[NeurIPS, 2023]** **SPAE**：语义金字塔自编码器，用于在冻结LLM上进行多模态生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.17842)\n  - **[ICLR, 2023]** **Unified-IO**：一个用于视觉、语言和多模态任务的统一模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08916) [代码](https:\u002F\u002Funified-io.allenai.org\u002F)\n  - **[ICML, 2023]** 将语言模型与图像结合，用于多模态输入和输出 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.13823) [代码](https:\u002F\u002Fgithub.com\u002Fkohjingyu\u002Ffromage)\n  - **[NeurIPS, 2022]** **Flamingo**：一种用于少样本学习的视觉语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.14198)\n  - **[Arxiv, 2021.12]** **ERNIE-ViLG**：用于双向视觉-语言生成的统一生成式预训练 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.15283) \n  - **[KDD, 2021]** **M6**：一个中文多模态预训练器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.00823)\n\n### 个性化图像生成\n  - **[Arxiv, 2025.10]** TokenAR：通过自回归的标记级增强实现多主体生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16332) [代码](https:\u002F\u002Fgithub.com\u002Flyrig\u002FTokenAR)\n  - **[Arxiv, 2025.09]** EchoGen：利用前馈式主体驱动的自回归模型在任意场景中生成视觉回声 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.26127)\n  - **[Arxiv, 2025.08]** CoAR：将概念注入自回归模型以实现个性化文生图 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.07341) [代码](https:\u002F\u002Fgithub.com\u002FKZF-kzf\u002FCoAR)\n  - **[ICCV, 2025]** CSD-VAR：视觉自回归模型中的内容-风格分解 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.13984)\n  - **[Arxiv, 2025.07]** 基于尺度自回归模型的无训练风格个性化方法 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04482)\n  - **[Arxiv, 2025.04]** 基于尺度自回归模型的无训练风格对齐图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.06144)\n  - **[Arxiv, 2025.04]** 利用自回归模型进行个性化文生图 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.13162) [代码](https:\u002F\u002Fgithub.com\u002FKaiyueSun98\u002FT2I-Personalization-with-AR)\n  - **[CVPR, 2025]** 零样本风格化文生图，但要采用自回归方式 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17074)\n\n### 其他生成\n  - **[Arxiv, 2025.05]** RestoreVAR：用于一体化图像修复的视觉自回归生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18047) [代码](https:\u002F\u002Fgithub.com\u002Fsudraj2002\u002FRestoreVAR)\n  - **[Arxiv, 2025.04]** TAPNext：将任意点跟踪（TAP）视为下一个标记预测 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.05579)\n  - **[Arxiv, 2025.04]** 超越文字：通过多模态自回归模型推进长文本图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20198) [代码](https:\u002F\u002Ffingerrec.github.io\u002Flongtextar\u002F)\n  - **[Arxiv, 2025.03]** VARSR：用于图像超分辨率的视觉自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.18993) [代码](https:\u002F\u002Fgithub.com\u002Fquyp2000\u002FVARSR)\n  - **[Arxiv, 2025.03]** 下一尺度自回归模型是零样本单张图像对象视图合成器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.13588) [代码](https:\u002F\u002Fgithub.com\u002FShiran-Yuan\u002FArchonView)\n  - **[Arxiv, 2025.03]** 感知、理解与修复：基于自回归多模态生成模型的真实世界图像超分辨率 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.11073) [代码](https:\u002F\u002Fgithub.com\u002Fnonwhy\u002FPURE)\n  - **[Arxiv, 2025.02]** ARTalk：通过自回归模型实现语音驱动的3D头部动画 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20323) [代码](https:\u002F\u002Fxg-chu.site\u002Fproject_artalk\u002F)\n  - **[Arxiv, 2025.02]** 多自回归预测用于建模交互作用 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.08646)\n  - **[Arxiv, 2025.02]** SongGen：一种用于文本到歌曲生成的单阶段自回归Transformer [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13128) [代码](https:\u002F\u002Fgithub.com\u002FLiuZH-19\u002FSongGen)\n  - **[Arxiv, 2024.12]** DriveGPT：扩展用于驾驶的自回归行为模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14415)\n  - **[TII, 2025]** VarAD：通过视觉自回归建模实现轻量级高分辨率图像异常检测 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.17263) [代码](https:\u002F\u002Fgithub.com\u002Fcaoyunkang\u002FVarAD)\n  - **[Arxiv, 2024.12]** **DrivingGPT**：利用多模态自回归Transformer统一驾驶世界建模与规划 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18607) [页面](https:\u002F\u002Frogerchern.github.io\u002FDrivingGPT\u002F)\n  - **[Arxiv, 2024.12]** 推进视频帧的自回归续写 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.03758)\n  - **[Arxiv, 2024.12]** 需要两个人：通过反应式自回归扩散模型实时生成两人对话互动 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02419)\n  - **[Arxiv, 2024.12]** **X-Prompt**：迈向自回归视觉语言基础模型中的通用上下文图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01824) [代码](https:\u002F\u002Fgithub.com\u002FSunzeY\u002FX-Prompt)\n  - **[Arxiv, 2024.12]** **3D-WAG**：用于高保真3D形状的分层小波引导自回归生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19037) [代码](https:\u002F\u002Fgithub.com\u002FTejaswiniMedi\u002F3DWAG-AR)\n  - **[Arxiv, 2024.11]** **SAR3D**：通过多尺度3D VQVAE实现自回归3D对象生成与理解 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.16856) [代码](https:\u002F\u002Fgithub.com\u002Fcyw-3d\u002FSAR3D)\n  - **[Arxiv, 2024.11]** 可扩展的自回归单目深度估计 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.11361)\n  - **[Arxiv, 2024.11]** LLaMA-Mesh：用语言模型统一3D网格生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09595) [代码](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FLLaMa-Mesh)\n  - **[Arxiv, 2024.10]** DART：一种基于扩散的自回归运动模型，用于实时文本驱动的运动控制 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05260)\n  - **[Arxiv, 2024.10]** 用于机器人操作的自回归动作序列学习 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03132) [代码](https:\u002F\u002Fgithub.com\u002Fmlzxy\u002Farp)\n  - **[Arxiv, 2024.09]** BAD：用于文本到运动生成的双向自回归扩散 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2409.10847) [代码](https:\u002F\u002Fgithub.com\u002FRohollahHS\u002FBAD)\n  - **[Arxiv, 2024.07]** 视频上下文学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07356)\n  - **[CVPR, 2024]** 序列建模使大型视觉模型实现可扩展学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00785) [代码](https:\u002F\u002Fgithub.com\u002Fytongbai\u002FLVM)\n  - **[AAAI, 2024]** 自回归全感知外延填充用于开放词汇360度图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.03467) [代码](https:\u002F\u002Fgithub.com\u002FzhuqiangLu\u002FAOG-NET-360)\n  - **[arxiv, 2024]** **LM4LV**：用于低级视觉任务的冻结大型语言模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15734) [代码](https:\u002F\u002Fgithub.com\u002Fbytetriper\u002FLM4LV)\n  - **[CVPR, 2024]** **ARTrackV2**：通过提示指导自回归跟踪器关注何处及如何描述 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.17133) [代码](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)\n  - **[CVPR, 2023亮点]** 自回归视觉跟踪 [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FWei_Autoregressive_Visual_Tracking_CVPR_2023_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)\n  - **[CVPR, 2023]** **视觉思维链**：通过多模态补全弥合逻辑空白 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.02317)\n  - **[NeurIPS, 2022]** 通过图像修复进行视觉提示 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.00647) [代码](https:\u002F\u002Fgithub.com\u002Famirbar\u002Fvisual_prompting)\n  - **[EMNLP, 2022]** **MAGMA**——通过基于适配器的微调增强生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.05253)\n  - **[NeurIPS, 2021]** 使用冻结语言模型进行多模态少样本学习 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.13884)\n  - **[ECCV, 2020]** 自回归无监督图像分割 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.08247)\n\n### 基准测试 \u002F 分析\n  - **[Arxiv, 2025.09]** GenExam：多学科文本到图像考试 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.14232) [代码](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FGenExam)\n  - **[Arxiv, 2025.09]** 传话游戏：评估统一模型中的语义漂移 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.04438) [代码](https:\u002F\u002Fgithub.com\u002Fmollahsabbir\u002FSemantic-Drift-in-Unified-Models)\n  - **[Arxiv, 2025.08]** Echo-4o：利用GPT-4o合成图像的力量提升图像生成质量 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.09987) [代码](https:\u002F\u002Fgithub.com\u002Fyejy53\u002FEcho-4o)\n  - **[Arxiv, 2025.07]** GPT-IMAGE-EDIT-1.5M：百万规模的GPT生成图像数据集 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.21033) [代码](https:\u002F\u002Fgithub.com\u002Fwyhlovecpp\u002FGPT-Image-Edit\u002Ftree\u002Fmain)\n  - **[Arxiv, 2025.05]** ImgEdit：统一的图像编辑数据集和基准测试 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20275) [代码](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FImgEdit)\n  - **[Arxiv, 2025.05]** RISEBench：超越像素的构想：基于推理的视觉编辑基准测试 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02826) [代码](https:\u002F\u002Fgithub.com\u002FPhoenixZ810\u002FRISEBench)\n  - **[Arxiv, 2025.05]** 统一视觉-语言模型是否必要：理解与生成之间的泛化能力 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23043) [代码](https:\u002F\u002Fgithub.com\u002FMajorDavidZhang\u002FGeneralization_unified_VLM)\n  - **[Arxiv, 2025.05]** TokBench：在进行视觉生成之前评估你的视觉分词器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18142) [代码](https:\u002F\u002Fgithub.com\u002Fwjf5203\u002FTokBench)\n  - **[Arxiv, 2025.05]** VTBench：评估用于自回归图像生成的视觉分词器 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.13439) [代码](https:\u002F\u002Fgithub.com\u002Fhuawei-lin\u002FVTBench)\n  - **[Arxiv, 2025.05]** UniEval：针对统一多模态理解和生成的综合评估 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10483) [代码](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FUniEval)\n  - **[Arxiv, 2025.05]** WorldGenBench：一个融合世界知识的基准测试，用于推理驱动的文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.01490) [代码](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fworldrl\u002FWorldGenBench)\n  - **[Arxiv, 2025.04]** MME-Unify：针对统一多模态理解和生成模型的全面基准测试 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.03641) [代码](https:\u002F\u002Fgithub.com\u002FMME-Benchmarks\u002FMME-Unify)\n  - **[Arxiv, 2025.04]** GPT-ImgEval：诊断GPT4o在图像生成中表现的全面基准测试 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02782) [代码](https:\u002F\u002Fgithub.com\u002FPicoTrex\u002FGPT-ImgEval)\n  - **[Arxiv, 2025.03]** WISE：一种受世界知识启发的文本到图像生成语义评估 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07265) [代码](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FWISE)\n  - **[Arxiv, 2025.03]** 自回归视频扩散模型的错误分析：一个统一的框架 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10704) [代码](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FMeta-ARVDM)\n  - **[Arxiv, 2024.10]** 扩散模型胜过自回归模型：对文本到图像模型中组合生成的评估 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.22775)\n\n### 推理对齐\n  - **[Arxiv, 2025.10]** 提升自回归图像生成的思维链效率 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.05593)\n  - **[Arxiv, 2025.09]** STAGE：稳定且可推广的GRPO，用于自回归图像生成 [论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2509.25027) [代码](https:\u002F\u002Fgithub.com\u002Fkrennic999\u002FSTAGE)\n  - **[Arxiv, 2025.09]** 针对自回归图像生成的关键标记策略优化 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22485) [代码](https:\u002F\u002Fgithub.com\u002Fzghhui\u002FGCPO)\n  - **[Arxiv, 2025.09]** 理解融入生成：通过将理解注入生成来强化统一模型的生成能力 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.18639) [代码](https:\u002F\u002Fgithub.com\u002FQC-LY\u002FUiG)\n  - **[Arxiv, 2025.09]** 理解与生成能否真正相互促进——还是仅仅共存？ [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.09666) [代码](https:\u002F\u002Fgithub.com\u002FPKU-YuanGroup\u002FUAE)\n  - **[Arxiv, 2025.08]** AR-GRPO：通过强化学习训练自回归图像生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06924) [代码](https:\u002F\u002Fgithub.com\u002FKwai-Klear\u002FAR-GRPO)\n  - **[Arxiv, 2025.08]** 强化学习在自回归图像编辑中的潜力 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01119) [代码](https:\u002F\u002Fgithub.com\u002Fmair-lab\u002FEARL)\n  - **[Arxiv, 2025.07]** X-Omni：强化学习让离散自回归图像生成模型再次焕发生机 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.22058) [代码](https:\u002F\u002Fgithub.com\u002FX-Omni-Team\u002FX-Omni) [页面](https:\u002F\u002Fx-omni-team.github.io\u002F)\n  - **[Arxiv, 2025.07]** 思维链化的扩散模型：让我们逐步强化T2I生成过程 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04451)\n  - **[Arxiv, 2025.06]** 深入研究带有思维链的强化学习在图像生成中的应用：DPO与GRPO的比较 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.17017) [代码](https:\u002F\u002Fgithub.com\u002FZiyuGuo99\u002FImage-Generation-CoT)\n  - **[Arxiv, 2025.06]** 通过强化学习解锁顿悟时刻：推进协作式视觉理解和生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.01480) [代码](https:\u002F\u002Fjanus-pro-r1.github.io\u002F)\n  - **[Arxiv, 2025.06]** ReasonGen-R1：通过SFT和RL为自回归图像生成模型提供思维链支持 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24875) [代码](https:\u002F\u002Fgithub.com\u002FFranklin-Zhang0\u002FReasonGen-R1)\n  - **[Arxiv, 2025.05]** UniRL：通过监督学习和强化学习实现自我改进的统一多模态模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23380) [代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FUniRL)\n  - **[Arxiv, 2025.05]** UniGen：增强统一多模态理解和生成的训练与测试阶段策略 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14682)\n  - **[Arxiv, 2025.04]** SimpleAR：通过预训练、SFT和强化学习推动自回归视觉生成的前沿 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11455) [代码](https:\u002F\u002Fgithub.com\u002Fwdrink\u002FSimpleAR)\n  - **[Arxiv, 2025.03]** **LightGen**：通过知识蒸馏和直接偏好优化实现高效图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.08619) [代码](https:\u002F\u002Fgithub.com\u002FXianfengWu01\u002FLightGen)\n  - **[Arxiv, 2025.02]** 以思维链引导的自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16965) [代码](https:\u002F\u002Fgithub.com\u002FLTH14\u002Ffractalgen)\n  - **[Arxiv, 2025.01]** 我们能用思维链生成图像吗？让我们逐步验证并强化图像生成过程 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13926) [代码](https:\u002F\u002Fgithub.com\u002FZiyuGuo99\u002FImage-Generation-CoT)\n\n### 安全性\n  - **[Arxiv, 2025.09]** 弥合安全差距：视觉自回归模型中的手术级概念擦除 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.22400)\n  - **[Arxiv, 2025.06]** 无限的BitMark：基于位级自回归图像生成模型的水印技术 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21209) \n  - **[Arxiv, 2025.06]** EAR：从统一自回归模型中擦除概念 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.20151) [代码](https:\u002F\u002Fgithub.com\u002Fimmc-lab\u002Fear\u002F)\n  - **[Arxiv, 2025.06]** 自回归图像生成的水印技术 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.16349) [代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fwmar)\n  - **[Arxiv, 2025.06]** 自回归图像生成模型的水印方案 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.11371)\n  - **[Arxiv, 2025.05]** 针对自回归图像生成的免训练水印技术 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14673) [代码](https:\u002F\u002Fgithub.com\u002Fmaifoundations\u002FIndexMark)\n  - **[Arxiv, 2025.02]** 图像自回归模型的隐私攻击 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02514) [代码](https:\u002F\u002Fgithub.com\u002Fsprintml\u002Fprivacy_attacks_against_iars)\n\n### 加速\n  - **[Arxiv, 2025.10]** Hawk：利用空间上下文加速自回归文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.25739)\n  - **[Arxiv, 2025.10]** MC-SJD：最大耦合推测式雅可比解码用于加速自回归视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fhtml\u002F2510.24211v1) \n  - **[NeurIPS 2025, Arxiv\u002F2025.10]** 推测式雅可比去噪解码用于加速自回归文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08994) \n  - **[Arxiv, 2025.09]** Hyper-Bagel：多模态理解与生成的统一加速框架 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18824) [页面](https:\u002F\u002Fhyper-bagel.github.io\u002F)\n  - **[Arxiv, 2025.07]** 局部感知并行解码用于高效自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.01957) [代码](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002Flpd)\n  - **[Arxiv, 2025.05]** DiSA：自回归图像生成中的扩散步退火 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20297) [代码](https:\u002F\u002Fgithub.com\u002FQinyu-Allen-Zhao\u002FDiSA)\n  - **[Arxiv, 2025.05]** FastCar：缓存注意力重放用于边缘端快速自回归视频生成 [代码](https:\u002F\u002Fgithub.com\u002Fshawnricecake\u002Ffast-car) [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14709)\n  - **[Arxiv, 2025.04]** 用于连续潜在空间生成的快速自回归模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18391)\n  - **[ICLR, 2025]** 蒸馏解码1：通过流匹配实现图像自回归模型的一步采样 [论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zKlFXV87Pp) [代码](https:\u002F\u002Fgithub.com\u002Fimagination-research\u002Fdistilled-decoding)\n  - **[CVPR口头报告, 2025]** 扩散Transformer的自回归蒸馏 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11295) [代码](https:\u002F\u002Fgithub.com\u002Falsdudrla10\u002FARD)\n  - **[Arxiv, 2025.04]** 头部感知KV缓存压缩用于高效视觉自回归建模 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.09261)\n  - **[CVPR, 2025]** 从慢速双向到快速自回归视频扩散模型 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07772) [代码](https:\u002F\u002Fgithub.com\u002Ftianweiy\u002FCausVid)\n  - **[Arxiv, 2025.03]** 对角线解码实现快速自回归视频生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.14070)\n  - **[CVPR 2025, 2025\u002F2024.12]** 并行化自回归视觉生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15119) [代码](https:\u002F\u002Fgithub.com\u002FEpiphqny\u002FPAR)\n  - **[Arxiv, 2024.11]** 协作解码使视觉自回归建模更高效 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17787) [代码](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FCoDe)\n  - **[Arxiv, 2024.11]** 连续推测式解码用于自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.11925) [代码](https:\u002F\u002Fgithub.com\u002FMarkXCloud\u002FCSpD)\n  - **[ICLR, 2025\u002F2024.10]** 使用免训练推测式雅可比解码加速自回归文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01699)\n\n### 稳定性与扩展性\n  - **[Arxiv, 2025.03]** 通过粗粒度到细粒度的标记预测改进自回归图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.16194) [代码](https:\u002F\u002Fgithub.com\u002FGzyAftermath\u002FCTF)\n  - **[Arxiv, 2025.03]** 向自回归多模态基础模型教授度量距离 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02379) \n  - **[Arxiv, 2024.12]** 512字节内的3D表示：变分分词器是自回归3D生成的关键 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02202) [页面](https:\u002F\u002Fsparse-mvs-2.github.io\u002FVAT.IO\u002F)\n  - **[Arxiv, 2024.12]** JetFormer：一种原始图像和文本的自回归生成模型 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19722) \n  - **[Arxiv, 2024.10]** 阐明用于图像生成的语言模型设计空间 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.16257) [代码](https:\u002F\u002Fgithub.com\u002FPepper-lll\u002FLMforImageGeneration)\n  - **[NeurIPS, 2024]** 稳定图像自回归建模的潜在空间：统一视角 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12490) [代码](https:\u002F\u002Fgithub.com\u002FDAMO-NLP-SG\u002FDiGIT)\n  - **[Arxiv, 2024.09]** 预训练语言模型无助于自回归文本到图像生成 [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.16201)\n  - **[Arxiv, 2020]** 自回归生成建模的规模定律 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.14701)\n\n### 教程\n-  [MIT 6.S978 深度生成模型](https:\u002F\u002Fmit-6s978.github.io\u002Fassets\u002Fpdfs\u002Flec3_ar.pdf) 由何恺明主讲\n-  [UvA DL笔记](https:\u002F\u002Fuvadlc-notebooks.readthedocs.io\u002Fen\u002Flatest\u002Ftutorial_notebooks\u002Ftutorial12\u002FAutoregressive_Image_Modeling.html)\n-  [MSC深度学习](https:\u002F\u002Fhal.cse.msu.edu\u002Fteaching\u002F2022-fall-deep-learning\u002F18-autoregressive-models\u002F#\u002F2)\n\n### 评估指标\n| 指标                              | 分析类型               | 参考文献                                            |\n|-------------------------------------|-----------------------------|------------------------------------------------------|\n| Inception Score (IS) ↑                | 定量              | [Salimans et al., 2016](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.03498)                                |\n| Fréchet Inception Distance (FID)  ↓   | 定量              | [Heusel et al., 2017](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.08500)                                  |\n| Kernel Inception Distance (KID)  ↓    | 定量              | [Binkowski et al., 2018](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1801.01401)                               |\n| Precision and Recall ↑                | 定量              | [Powers, 2020](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.16061)                                         |\n| CLIP Maximum Mean Discrepancy ↓       | 定量              | [Jayasumana et al., 2023](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.09603)                              |\n| CLIP Score ↑                          | 定量              | [Hessel et al., 2021](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08718)                                  |\n| R-precision ↑                         | 定量              | [Craswell et al., 2009](https:\u002F\u002Fdoi.org\u002F10.1007\u002F978-0-387-39940-9_486)                   |\n| Perceptual Path Length  ↓             | 定量              | [Karras et al., 2019](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.04948)                                  |\n| Fréchet Video Distance (FVD) ↓        | 定量              | [Unterthiner et al., 2019](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.01717)                             |\n| 美学（专家评价）↑       | 定性               | 基于领域专业知识                                                                |\n| 图灵测试                           | 定性               | [Turing, 1950](https:\u002F\u002Facademic.oup.com\u002Fmind\u002Farticle\u002FLIX\u002F236\u002F433\u002F986238)                 |\n| 用户研究（评分、满意度）↑ | 定性               | 多种多样，取决于用户研究的方法学                                         |\n\n## 👍 致谢\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FAwesome-Unified-Multimodal-Models), 新加坡国立大学\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002FPurshow\u002FAwesome-Unified-Multimodal), 北京大学\n- [Awesome Unified Multimodal Models](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FAwesome-Unified-Multimodal-Models), [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02567), 阿里巴巴\n\n\n## ♥️ 贡献者\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_readme_6e44039432bc.png\" \u002F>\n\u003C\u002Fa>","# Autoregressive-Models-in-Vision-Survey 快速上手指南\n\n本项目并非一个可直接运行的软件库，而是一个**精选的视觉自回归模型（Autoregressive Models in Vision）论文与代码资源列表**。它旨在为研究者和开发者提供该领域最新的研究进展、分类整理及开源实现链接。\n\n以下是如何高效利用本资源库的指南：\n\n## 1. 环境准备\n\n由于本项目是文献综述列表，本身无系统依赖。但若要运行列表中具体的模型代码，通常需要以下基础环境：\n\n*   **操作系统**: Linux (推荐 Ubuntu 20.04+) 或 macOS\n*   **Python**: 3.8 或更高版本\n*   **深度学习框架**: PyTorch (大多数列出项目的首选框架)\n*   **包管理工具**: `pip` 或 `conda`\n*   **网络环境**: 访问 GitHub 和 arXiv 需要稳定的网络连接（建议配置科学上网或使用国内镜像源加速 Python 包下载）。\n\n## 2. 获取资源与安装\n\n本仓库无需传统意义上的“安装”，只需克隆仓库以获取最新的论文列表和分类索引。\n\n### 克隆仓库\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey.git\ncd Autoregressive-Models-in-Vision-Survey\n```\n\n### 浏览资源\n克隆后，直接在本地打开 `README.md` 文件，或通过 GitHub 网页版浏览。目录结构涵盖了：\n*   **图像生成** (Image Generation)\n*   **视频生成** (Video Generation)\n*   **3D 生成** (3D Generation)\n*   **多模态生成** (Multimodal Generation)\n*   **基准测试与分析** (Benchmark \u002F Analysis) 等类别。\n\n> **注意**：列表中的每个条目都包含 `[Paper]` (论文链接) 和 `[Code]` (代码仓库链接)。你需要点击具体的 `[Code]` 链接跳转到对应项目的仓库，按照该项目各自的 `README` 进行环境配置和安装。\n\n## 3. 基本使用\n\n使用本项目的核心流程是：**查找论文 -> 定位代码 -> 复现模型**。\n\n### 步骤一：查找目标模型\n在 `README.md` 中根据你的需求查找类别。例如，若你需要寻找基于 **Token-wise** 的图像生成最新工作，可定位到 `Image Generation` -> `Token-wise Generation` 部分。\n\n*示例条目：*\n> **[Arxiv, 2024.12]** **TokenFlow**: Unified Image Tokenizer for Multimodal Understanding and Generation [Paper](link) [Code](link)\n\n### 步骤二：访问代码仓库\n点击条目后的 `[Code]` 链接（例如 TokenFlow 的 GitHub 地址）。\n\n### 步骤三：安装具体模型依赖\n进入具体模型的仓库后，通常执行以下标准操作（以典型的 PyTorch 项目为例）：\n\n```bash\n# 1. 创建虚拟环境 (推荐)\nconda create -n ar_vision python=3.9\nconda activate ar_vision\n\n# 2. 安装 PyTorch (根据 CUDA 版本选择，推荐使用国内清华源加速)\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n\n# 3. 安装该项目特定依赖\npip install -r requirements.txt\n```\n\n### 步骤四：运行推理或训练\n参照具体项目仓库中的 `Usage` 或 `Quick Start` 部分运行代码。例如：\n\n```bash\npython inference.py --config configs\u002Ftokenflow.yaml --checkpoint pretrained_model.pth\n```\n\n## 4. 贡献与更新\n\n*   **状态说明**: 截至 2025 年 11 月，该仓库已进入**维护模式**，不再主动进行大规模更新，但仍接受针对新趋势（如统一多模态模型、自回归扩散视频生成）的 Pull Requests。\n*   **提交新作**: 如果你希望将自己的工作或模型加入列表，可以通过发送邮件至 `jhuang90@ur.rochester.edu` 或提交 PR。\n    *   PR 格式要求：\n    ```markdown\n    * [**Conference\u002FJournal Year**] Paper Name. [Paper](link) [Code](link)\n    ```\n\n## 5. 引用\n\n如果在你的研究中使用了本列表提供的资源，请引用以下论文：\n\n```bibtex\n@misc{xiong2024autoregressive,\n    title={Autoregressive Models in Vision: A Survey},\n    author={Jing Xiong and Gongye Liu and Lun Huang and Chengyue Wu and Taiqiang Wu and Yao Mu and Yuan Yao and Hui Shen and Zhongwei Wan and Jinfa Huang and Chaofan Tao and Shen Yan and Huaxiu Yao and Lingpeng Kong and Hongxia Yang and Mi Zhang and Guillermo Sapiro and Jiebo Luo and Ping Luo and Ngai Wong},\n    year={2024},\n    eprint={2411.05902},\n    archivePrefix={arXiv},\n    primaryClass={cs.CV}\n}\n```","某高校计算机视觉实验室的研究团队正计划开展一项关于“基于自回归架构的高清视频生成”的前沿课题研究，急需全面掌握该领域的最新技术路线。\n\n### 没有 Autoregressive-Models-in-Vision-Survey 时\n- **文献检索效率低下**：研究人员需要在 arXiv、Google Scholar 等多个平台手动搜索关键词，耗费数周时间才能拼凑出零散的论文列表，且极易遗漏重要成果。\n- **技术脉络模糊不清**：面对海量论文，难以理清自回归模型在视觉领域从图像生成到视频生成的演进逻辑，无法快速识别哪些是开创性工作，哪些是改进型方案。\n- **复现选型困难**：缺乏对各类模型架构、数据集及训练策略的系统性对比，导致在确定实验基线（Baseline）时盲目尝试，增加了试错成本和时间消耗。\n- **前沿趋势滞后**：难以及时捕捉如“统一多模态模型”或“自回归扩散强制视频生成”等最新涌现的技术趋势，可能导致研究选题在开题时已显过时。\n\n### 使用 Autoregressive-Models-in-Vision-Survey 后\n- **一站式获取权威清单**：直接利用该仓库整理的精选论文列表，几分钟内即可获取涵盖最新进展的完整文献库，大幅缩短前期调研周期。\n- **清晰构建知识图谱**：通过仓库中对技术发展的系统性梳理，快速掌握自回归视觉模型的核心分类与演进路径，精准定位关键里程碑论文。\n- **高效确定实验方案**：参考文中对各类方法的详细对比与总结，迅速锁定最适合当前任务的最优基线模型和配套资源，显著降低实验启动门槛。\n- **紧跟领域最新动态**：借助仓库持续的更新机制（直至维护模式前）及对新趋势的敏锐洞察，确保研究方向始终对标国际最前沿，提升创新成功率。\n\nAutoregressive-Models-in-Vision-Survey 将原本耗时数周的碎片化调研工作压缩为小时级的系统化学习，成为视觉生成领域研究者不可或缺的导航图。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FChaofanTao_Autoregressive-Models-in-Vision-Survey_22f7bed6.png","ChaofanTao","Chaofan Tao","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FChaofanTao_deee289f.jpg","Ph.D. @ The University of Hong Kong (HKU)",null,"Hong Kong","tcftrees@gmail.com","https:\u002F\u002Fchaofantao.top\u002F","https:\u002F\u002Fgithub.com\u002FChaofanTao",792,23,"2026-04-11T14:47:22",1,"","未说明",{"notes":88,"python":86,"dependencies":89},"该项目是一个关于视觉自回归模型的论文综述列表（Awesome List），并非可执行的软件工具或代码库，因此没有具体的运行环境、依赖库或硬件需求。用户主要使用该仓库查阅论文链接、代码库链接及相关研究进展。",[],[91,92,15,14],"视频","其他",[94,95,96,97,98,99,100,101,102,103,104,105,106,107],"autoregressive","diffusion","embodied-ai","image-generation","medical-ai","multimodal","survey","text-to-image","video-generation","acceleration","motion-prediction","point-cloud","deep-learning","computer-vision","2026-03-27T02:49:30.150509","2026-04-12T16:49:06.363017",[111,116],{"id":112,"question_zh":113,"answer_zh":114,"source_url":115},30820,"与非自回归模型（如典型的扩散模型）相比，视觉自回归模型有哪些独特优势？","视觉自回归模型相比非自回归模型主要有三大独特优势：\n1. **缩放定律（Scaling Laws）**：NLP 中下一个 token 预测范式的成功很大程度上归功于成熟的缩放定律。虽然扩散模型和 GAN 的缩放定律尚待探索，但自回归视觉模型有望迁移 NLP 的成功经验，实现视觉生成框架的高效扩展（参考论文：LlamaGen 和 FLUID）。\n2. **部署效率**：自回归生成模型可以利用专为语言模型设计的现有部署技术。例如，VLLM 等框架提供了自回归加速功能，显著提高了生成效率。\n3. **连接语言与视觉**：自回归模型代表了统一多模态理解与生成的潜在里程碑。","https:\u002F\u002Fgithub.com\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey\u002Fissues\u002F4",{"id":117,"question_zh":118,"answer_zh":119,"source_url":120},30821,"如何提交新的相关论文以便被收录到该综述仓库中？","用户可以通过在 GitHub 仓库中开启一个 Issue 来推荐新的论文。在 Issue 中提供论文的标题、链接以及简要介绍（例如属于哪个类别），维护者审核后会将其添加到仓库的相应分类中以增加其可见性。","https:\u002F\u002Fgithub.com\u002FChaofanTao\u002FAutoregressive-Models-in-Vision-Survey\u002Fissues\u002F6",[]]